code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowercase_ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : str ):
super().__init__()
_A = model
_A = 2
_A = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : str ) -> str:
'''simple docstring'''
_A = LongformerModel.from_pretrained(UpperCamelCase_ )
_A = LightningModel(UpperCamelCase_ )
_A = torch.load(UpperCamelCase_ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_A = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 315
|
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __A :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : int=[1, 1, 2] , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Any="gelu_new" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=False , ) ->str:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = 2
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case_ = n_head
# Used in the tests to check the size of the first hidden state
snake_case_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case_ = self.num_hidden_layers + 2
def lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ) ->Tuple:
"""simple docstring"""
snake_case_ = TFFunnelModel(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , ) ->int:
"""simple docstring"""
snake_case_ = TFFunnelForPreTraining(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , ) ->str:
"""simple docstring"""
snake_case_ = TFFunnelForMaskedLM(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , ) ->Any:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = TFFunnelForSequenceClassification(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = TFFunnelForMultipleChoice(config=UpperCAmelCase_ )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = TFFunnelForTokenClassification(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , ) ->List[Any]:
"""simple docstring"""
snake_case_ = TFFunnelForQuestionAnswering(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__lowercase: Optional[Any] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase: List[str] = False
__lowercase: Optional[Any] = False
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
snake_case_ = TFFunnelModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@require_tf
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__lowercase: int = False
__lowercase: Dict = False
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = TFFunnelModelTester(self , base=UpperCAmelCase_ )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
| 233
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]="relu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Optional[Any]=0 , ) ->Dict:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.eos_token_id # Eos Token
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = prepare_mam_aaa_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).get_decoder().to(UpperCAmelCase_ ).eval()
snake_case_ = inputs_dict["""input_ids"""]
snake_case_ = inputs_dict["""attention_mask"""]
snake_case_ = inputs_dict["""head_mask"""]
# first forward pass
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )["""last_hidden_state"""]
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[
"""last_hidden_state"""
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 ) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval()
snake_case_ = model(**UpperCAmelCase_ )
snake_case_ = outputs.encoder_last_hidden_state
snake_case_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase_ )
snake_case_ = MaMaaaEncoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase_ )
snake_case_ = MaMaaaDecoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__lowercase: Tuple = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__lowercase: Dict = True
__lowercase: List[Any] = True
__lowercase: Union[str, Any] = False
__lowercase: Optional[int] = False
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) ->str:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = MaMaaaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
snake_case_ , snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not self.is_encoder_decoder:
snake_case_ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
snake_case_ = inputs["""input_ids"""]
snake_case_ = inputs.get("""decoder_input_ids""" , UpperCAmelCase_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , UpperCAmelCase_ )
snake_case_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case_ = wte(UpperCAmelCase_ )
else:
snake_case_ = wte(UpperCAmelCase_ )
snake_case_ = wte(UpperCAmelCase_ )
with torch.no_grad():
model(**UpperCAmelCase_ )[0]
def lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = input_dict["""input_ids"""]
snake_case_ = input_ids.ne(1 ).to(UpperCAmelCase_ )
snake_case_ = MaMaaaForConditionalGeneration(UpperCAmelCase_ ).eval().to(UpperCAmelCase_ )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
model.generate(num_beams=4 , do_sample=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , num_return_sequences=3 )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
snake_case_ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
snake_case_ = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
# change to intended input
snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
snake_case_ = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
snake_case_ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
snake_case_ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" )
snake_case_ = model.generate(
input_ids=dct["""input_ids"""].to(UpperCAmelCase_ ) , attention_mask=dct["""attention_mask"""].to(UpperCAmelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
snake_case_ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
snake_case_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert generated == expected_en
| 233
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (DDPMScheduler,)
def _lowercase ( self : Optional[int], **UpperCAmelCase__ : List[Any] ):
__lowercase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Tuple ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def _lowercase ( self : Any ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__, beta_end=UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def _lowercase ( self : int ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def _lowercase ( self : str ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__, prediction_type=UpperCAmelCase__, sample_max_value=UpperCAmelCase__, )
def _lowercase ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def _lowercase ( self : Dict ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
__lowercase = model(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _lowercase ( self : List[str] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
__lowercase = model(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _lowercase ( self : Optional[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
__lowercase = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
__lowercase = -1
else:
__lowercase = timesteps[i + 1]
__lowercase = scheduler.previous_timestep(UpperCAmelCase__ )
__lowercase = prev_t.item()
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__, msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def _lowercase ( self : List[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
__lowercase = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__, timesteps=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 17
|
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case : Any = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default='NER' ,metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} ,)
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
_a = import_module('''tasks''' )
try:
_a = getattr(UpperCamelCase , model_args.task_type )
_a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_a = token_classification_task.get_labels(data_args.labels )
_a = dict(enumerate(UpperCamelCase ) )
_a = len(UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase , idalabel=UpperCamelCase , labelaid={label: i for i, label in enumerate(UpperCamelCase )} , cache_dir=model_args.cache_dir , )
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
_a = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_a = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
_a = np.argmax(UpperCamelCase , axis=2 )
_a , _a = preds.shape
_a = [[] for _ in range(UpperCamelCase )]
_a = [[] for _ in range(UpperCamelCase )]
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a , _a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase , UpperCamelCase ),
"precision": precision_score(UpperCamelCase , UpperCamelCase ),
"recall": recall_score(UpperCamelCase , UpperCamelCase ),
"f1": fa_score(UpperCamelCase , UpperCamelCase ),
}
# Data collator
_a = DataCollatorWithPadding(UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_a = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCamelCase , UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase )
# Predict
if training_args.do_predict:
_a = TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_a , _a , _a = trainer.predict(UpperCamelCase )
_a , _a = align_predictions(UpperCamelCase , UpperCamelCase )
_a = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , UpperCamelCase , UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_a = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return results
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 179
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Dict = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 1
|
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _A ( lowercase ):
"""simple docstring"""
a =credit_card_number
a =0
a =len(lowercase ) - 2
for i in range(lowercase , -1 , -2 ):
# double the value of every second digit
a =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a =cc_number[:i] + str(lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _A ( lowercase ):
"""simple docstring"""
a =f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(lowercase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(lowercase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(lowercase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 81
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCamelCase_ : Optional[int] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCamelCase_ : Optional[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCamelCase_ : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Optional[Any]:
if return_pvalue:
a =pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )}
| 81
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : str = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = DPTConfig()
if "large" in checkpoint_url:
lowerCamelCase_ = 10_24
lowerCamelCase_ = 40_96
lowerCamelCase_ = 24
lowerCamelCase_ = 16
lowerCamelCase_ = [5, 11, 17, 23]
lowerCamelCase_ = [2_56, 5_12, 10_24, 10_24]
lowerCamelCase_ = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = 1_50
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = json.load(open(cached_download(hf_hub_url(lowercase , lowercase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase_ = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCamelCase_ = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCamelCase_ = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
lowerCamelCase_ = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCamelCase_ = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCamelCase_ = name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
lowerCamelCase_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase_ = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCamelCase_ = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCamelCase_ = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCamelCase_ = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCamelCase_ = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCamelCase_ = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCamelCase_ = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCamelCase_ = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase_ = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCamelCase_ = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCamelCase_ = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCamelCase_ = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCamelCase_ = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCamelCase_ = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCamelCase_ = name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCamelCase_ = name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCamelCase_ = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCamelCase_ = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCamelCase_ = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : List[Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCamelCase_ = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[Any] , lowercase : str , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = get_dpt_config(lowercase )
# load original state_dict from URL
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowercase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
lowerCamelCase_ = val
# read in qkv matrices
read_in_q_k_v(lowercase , lowercase )
# load HuggingFace model
lowerCamelCase_ = DPTForSemanticSegmentation(lowercase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowercase )
model.load_state_dict(lowercase )
model.eval()
# Check outputs on an image
lowerCamelCase_ = 4_80 if 'ade' in checkpoint_url else 3_84
lowerCamelCase_ = DPTImageProcessor(size=lowercase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(lowercase , return_tensors='pt' )
# forward pass
lowerCamelCase_ = model(**lowercase ).logits if 'ade' in checkpoint_url else model(**lowercase ).predicted_depth
# Assert logits
lowerCamelCase_ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
lowerCamelCase_ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowercase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowercase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowercase )
)
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowercase , lowercase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase , lowercase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowercase , )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
lowerCamelCase : Optional[int] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 208
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase : Optional[int] = float("nan")
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = sys.stdout
lowerCamelCase_ = open(A_ , 'a' )
def __getattr__( self : List[Any] , A_ : Optional[int] ) -> str:
"""simple docstring"""
return getattr(self.stdout , A_ )
def a__ ( self : int , A_ : int ) -> List[str]:
"""simple docstring"""
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , A_ , 0 , re.M ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str=80 , lowercase : Tuple=False ):
'''simple docstring'''
lowerCamelCase_ = []
# deal with critical env vars
lowerCamelCase_ = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase_ = os.environ.get(lowercase , lowercase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCamelCase_ = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase_ = []
lowerCamelCase_ = ''
while len(lowercase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(lowercase ) == 0 or len(lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase )
lowerCamelCase_ = ''
return "\\\n".join(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase_ = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCamelCase_ = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : Dict , lowercase : List[str] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
lowerCamelCase_ = subprocess.run(lowercase , capture_output=lowercase , text=lowercase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase_ = variation.replace(' ' , '-' )
with open(Path(lowercase ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowercase ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.load(lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Dict , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Dict , lowercase : Any , lowercase : int , ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = f"""{id}: {variation:<{longest_variation_len}}"""
lowerCamelCase_ = f"""{preamble}: """
lowerCamelCase_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase ) , desc=lowercase , leave=lowercase ):
lowerCamelCase_ = process_run_single(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase ):
metrics.append(lowercase )
results.append(lowercase )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase_ = f"""\33[2K\r{outcome}"""
if len(lowercase ) > 0:
lowerCamelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase_ = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase_ = f"""{outcome} {mean_target}"""
if len(lowercase ) > 1:
results_str += f""" {tuple(round(lowercase , 2 ) for x in results )}"""
print(lowercase )
lowerCamelCase_ = variation
return mean_metrics
else:
print(lowercase )
return {variation_key: variation, target_metric_key: nan}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = pd.DataFrame(lowercase )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = 'diff_%'
lowerCamelCase_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase ):
lowerCamelCase_ = df.apply(
lambda lowercase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase_ = df.reindex(lowercase , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase_ = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase_ = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase , floatfmt='.2f' )]
print('\n\n'.join(lowercase ) )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowercase , type=lowercase , required=lowercase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowercase , type=lowercase , nargs='+' , required=lowercase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowercase , type=lowercase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowercase , type=lowercase , required=lowercase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowercase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowercase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowercase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowercase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.output_dir
Path(lowercase ).mkdir(exist_ok=lowercase )
lowerCamelCase_ = get_base_command(lowercase , lowercase )
# split each dimension into its --foo variations
lowerCamelCase_ = [list(map(str.strip , re.split(r'\|' , lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase_ = list(map(str.strip , map(' '.join , itertools.product(*lowercase ) ) ) )
lowerCamelCase_ = max(len(lowercase ) for x in variations )
# split wanted keys
lowerCamelCase_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase_ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
lowerCamelCase_ = Tee(lowercase )
print(f"""\n*** Running {len(lowercase )} benchmarks:""" )
print(f"""Base command: {" ".join(lowercase )}""" )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = []
for id, variation in enumerate(tqdm(lowercase , desc='Total completion: ' , leave=lowercase ) ):
lowerCamelCase_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase , lowercase , lowercase , lowercase , args.target_metric_key , lowercase , args.repeat_times , lowercase , args.verbose , ) )
process_results(lowercase , args.target_metric_key , lowercase , args.base_variation , lowercase )
if __name__ == "__main__":
main()
| 208
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__):
_lowercase : Dict = """pixel_values"""
_lowercase : List[Any] = False
_lowercase : int = TimmBackboneConfig
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCAmelCase__ )
a__ : Optional[Any] =config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(lowerCAmelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
a__ : List[Any] =getattr(lowerCAmelCase__ , "use_pretrained_backbone" , lowerCAmelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
a__ : Dict =config.out_indices if getattr(lowerCAmelCase__ , "out_indices" , lowerCAmelCase__ ) is not None else (-1,)
a__ : List[Any] =timm.create_model(
config.backbone , pretrained=lowerCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase__ , **lowerCAmelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a__ : Optional[int] =self._backbone.return_layers
a__ : Union[str, Any] ={layer["module"]: str(lowerCAmelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase__ )
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
a__ : Any =kwargs.pop("config" , TimmBackboneConfig() )
a__ : List[Any] =kwargs.pop("use_timm_backbone" , lowerCAmelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
a__ : Optional[Any] =kwargs.pop("num_channels" , config.num_channels )
a__ : List[Any] =kwargs.pop("features_only" , config.features_only )
a__ : List[Any] =kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
a__ : Any =kwargs.pop("out_indices" , config.out_indices )
a__ : List[Any] =TimmBackboneConfig(
backbone=lowerCAmelCase__ , num_channels=lowerCAmelCase__ , features_only=lowerCAmelCase__ , use_pretrained_backbone=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , )
return super()._from_config(lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
pass
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a__ : int =return_dict if return_dict is not None else self.config.use_return_dict
a__ : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : Any =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a__ : List[str] =self._all_layers
a__ : Optional[Any] =self._backbone(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Union[str, Any] =self._return_layers
a__ : str =tuple(hidden_states[i] for i in self.out_indices )
else:
a__ : Union[str, Any] =self._backbone(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : List[str] =None
a__ : Optional[Any] =tuple(lowerCAmelCase__ )
a__ : int =tuple(lowerCAmelCase__ ) if hidden_states is not None else None
if not return_dict:
a__ : str =(feature_maps,)
if output_hidden_states:
a__ : Union[str, Any] =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , attentions=lowerCAmelCase__ )
| 95
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Dict = (32, 32)
lowerCAmelCase_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
@property
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(a_ )
@property
def lowerCamelCase ( self : Union[str, Any] ):
def extract(*a_ : Tuple , **a_ : Tuple ):
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = torch.ones([0] )
def lowerCamelCase ( self : str , a_ : Optional[int] ):
self.pixel_values.to(a_ )
return self
return Out()
return extract
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[Any] = self.dummy_cond_unet
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : str = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Any = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : Dict = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : str = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : List[str] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Any = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a_ )
assert isinstance(a_ , a_ )
assert isinstance(pipe.scheduler , a_ )
assert pipe.safety_checker is None
lowerCAmelCase_ : str = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.dummy_cond_unet
lowerCAmelCase_ : str = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : Tuple = self.dummy_vae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ : int = unet.half()
lowerCAmelCase_ : Dict = vae.half()
lowerCAmelCase_ : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[int] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ : Optional[int] = 40_03_66_03_46
lowerCAmelCase_ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase_ : List[str] = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Union[str, Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ : Union[str, Any] = 27_34_97_17_55
lowerCAmelCase_ : Union[str, Any] = 7
lowerCAmelCase_ : str = torch.manual_seed(a_ )
lowerCAmelCase_ : Dict = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase_ : Optional[int] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ : Any = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ : List[Any] = 10_44_35_52_34
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : int = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 241
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=2 ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=10 ,__UpperCamelCase=3 ,__UpperCamelCase=32 * 4 ,__UpperCamelCase=32 * 6 ,__UpperCamelCase=4 ,__UpperCamelCase=32 ,) -> str:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Any = batch_size
lowercase_ : Any = is_training
lowercase_ : Tuple = use_auxiliary_loss
lowercase_ : int = num_queries
lowercase_ : Optional[Any] = num_channels
lowercase_ : Optional[int] = min_size
lowercase_ : Optional[int] = max_size
lowercase_ : List[str] = num_labels
lowercase_ : Optional[Any] = mask_feature_size
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
lowercase_ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_A )
lowercase_ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_A ) > 0.5
).float()
lowercase_ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=_A ) > 0.5).long()
lowercase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ : Any = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = output.encoder_hidden_states
lowercase_ : List[Any] = output.pixel_decoder_hidden_states
lowercase_ : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) ,config.decoder_config.decoder_layers )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> int:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Optional[Any] = MaskFormerModel(config=_A )
model.to(_A )
model.eval()
lowercase_ : str = model(pixel_values=_A ,pixel_mask=_A )
lowercase_ : Tuple = model(_A ,output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A ,_A )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(__UpperCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(pixel_values=_A ,pixel_mask=_A )
lowercase_ : Tuple = model(_A )
comm_check_on_output(_A )
lowercase_ : List[str] = model(
pixel_values=_A ,pixel_mask=_A ,mask_labels=_A ,class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCamelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
lowercase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = MaskFormerModelTester(self )
lowercase_ : Any = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A ,**_A ,output_hidden_states=_A )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(_A )
lowercase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[int] = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase_ : Optional[Any] = MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[Any] = (self.model_tester.min_size,) * 2
lowercase_ : Optional[Any] = {
'pixel_values': torch.randn((2, 3, *size) ,device=_A ),
'mask_labels': torch.randn((2, 10, *size) ,device=_A ),
'class_labels': torch.zeros(2 ,10 ,device=_A ).long(),
}
lowercase_ : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
lowercase_ : List[str] = model(**_A )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A ,**_A ,output_hidden_states=_A )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(_A ).to(_A )
lowercase_ : Dict = model(**_A ,output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase_ : Optional[int] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
lowercase_ : Dict = model_class(_A )
model.to(_A )
model.train()
lowercase_ : Any = model(_A ,mask_labels=_A ,class_labels=_A ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowercase_ : List[Any] = True
lowercase_ : List[Any] = True
lowercase_ : List[Any] = model_class(_A )
model.to(_A )
model.train()
lowercase_ : List[str] = model(_A ,mask_labels=_A ,class_labels=_A )
lowercase_ : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase_ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE =1E-4
def lowercase__( ):
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_A )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : List[Any] = image_processor(_A ,return_tensors='pt' ).to(_A )
lowercase_ : Optional[int] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A ,(1, 3, 800, 1088) )
with torch.no_grad():
lowercase_ : List[Any] = model(**_A )
lowercase_ : Any = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_A ,atol=_A ) )
lowercase_ : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_A ,atol=_A ) )
lowercase_ : Optional[int] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_A ,atol=_A ) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_A )
.eval()
)
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(_A ,return_tensors='pt' ).to(_A )
lowercase_ : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A ,(1, 3, 800, 1088) )
with torch.no_grad():
lowercase_ : int = model(**_A )
# masks_queries_logits
lowercase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowercase_ : Union[str, Any] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
lowercase_ : Optional[int] = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_A ,atol=_A ) )
# class_queries_logits
lowercase_ : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : Any = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_A ,atol=_A ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_A )
.eval()
)
lowercase_ : List[str] = self.default_image_processor
lowercase_ : str = prepare_img()
lowercase_ : Dict = image_processor(_A ,return_tensors='pt' ).to(_A )
lowercase_ : Optional[int] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A ,(1, 3, 800, 1088) )
with torch.no_grad():
lowercase_ : str = model(**_A )
# masks_queries_logits
lowercase_ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowercase_ : Optional[int] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowercase_ : List[str] = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_A ,atol=_A ) )
# class_queries_logits
lowercase_ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : Tuple = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_A ,atol=_A ) )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_A )
.eval()
)
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Any = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
lowercase_ : int = inputs['pixel_values'].to(_A )
lowercase_ : Tuple = [el.to(_A ) for el in inputs['mask_labels']]
lowercase_ : List[Any] = [el.to(_A ) for el in inputs['class_labels']]
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**_A )
self.assertTrue(outputs.loss is not None )
| 364
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 0
|
def snake_case_ ( lowerCAmelCase_ : int ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Tuple = 0
__lowercase : Optional[int] = number
while duplicate > 0:
__lowercase , __lowercase : Tuple = divmod(lowerCAmelCase_ , 10 )
fact_sum += factorial(lowerCAmelCase_ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCamelCase : int = int(input('''Enter number: ''').strip())
print(
f'''{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'''
)
| 233
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = '''canine'''
def __init__( self : List[str] , __a : Optional[int]=768 , __a : Any=12 , __a : Any=12 , __a : Dict=3072 , __a : Dict="gelu" , __a : List[Any]=0.1 , __a : List[Any]=0.1 , __a : Tuple=16384 , __a : List[Any]=16 , __a : List[Any]=0.02 , __a : Optional[Any]=1E-12 , __a : Dict=0 , __a : List[Any]=0xe_0_0_0 , __a : Optional[int]=0xe_0_0_1 , __a : Any=4 , __a : Dict=4 , __a : Optional[int]=8 , __a : Any=16384 , __a : Optional[Any]=128 , **__a : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : int = max_position_embeddings
__lowercase : List[str] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Union[str, Any] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = initializer_range
__lowercase : Any = type_vocab_size
__lowercase : int = layer_norm_eps
# Character config:
__lowercase : int = downsampling_rate
__lowercase : str = upsampling_kernel_size
__lowercase : Union[str, Any] = num_hash_functions
__lowercase : Optional[Any] = num_hash_buckets
__lowercase : Optional[int] = local_transformer_stride
| 233
| 1
|
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 1
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowercase ( snake_case_ : str ) ->List[Any]:
'''simple docstring'''
if isinstance(snake_case_ ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __snake_case :
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Any = np.abs((a - b) ).max()
self.assertLessEqual(__lowerCamelCase , __lowerCamelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = FlaxVisionTextDualEncoderModel(__lowerCamelCase )
__A : int = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A , __A : List[str] = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
__A : List[str] = {'''vision_model''': vision_model, '''text_model''': text_model}
__A : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase )
__A : Dict = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A , __A : List[Any] = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
__A : int = {'''vision_model''': vision_model, '''text_model''': text_model}
__A : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase )
__A : Any = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
__A : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
__A : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
__A : Tuple = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
__A : Any = after_output[0]
__A : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-3 )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A , __A : Any = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
__A : Optional[int] = {'''vision_model''': vision_model, '''text_model''': text_model}
__A : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase )
__A : Any = model(
input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase )
__A : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : Optional[Any] = to_atuple(vision_model.config.image_size )
__A : Any = to_atuple(vision_model.config.patch_size )
__A : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__A : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__A : str = output.text_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
pt_model.to(__lowerCamelCase )
pt_model.eval()
# prepare inputs
__A : Dict = inputs_dict
__A : str = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__A : Optional[Any] = pt_model(**__lowerCamelCase ).to_tuple()
__A : int = fx_model(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowerCamelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__lowerCamelCase )
__A : int = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase , from_pt=__lowerCamelCase )
__A : List[Any] = fx_model_loaded(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowerCamelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__lowerCamelCase )
__A : List[str] = VisionTextDualEncoderModel.from_pretrained(__lowerCamelCase , from_flax=__lowerCamelCase )
pt_model_loaded.to(__lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
__A : Optional[int] = pt_model_loaded(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__lowerCamelCase , pt_output_loaded.numpy() , 4e-2 )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase )
__A : Optional[Any] = VisionTextDualEncoderModel(__lowerCamelCase )
__A : Any = FlaxVisionTextDualEncoderModel(__lowerCamelCase )
__A : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCamelCase )
__A : Optional[Any] = fx_state
self.check_pt_flax_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase )
__A : Any = VisionTextDualEncoderModel(__lowerCamelCase )
__A : List[Any] = FlaxVisionTextDualEncoderModel(__lowerCamelCase )
__A : Tuple = load_flax_weights_in_pytorch_model(__lowerCamelCase , fx_model.params )
self.check_pt_flax_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowerCamelCase )
@is_pt_flax_cross_test
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
__A : Optional[Any] = config_inputs_dict.pop('''vision_config''' )
__A : Tuple = config_inputs_dict.pop('''text_config''' )
__A : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.check_equivalence_flax_to_pt(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : List[Any] = self.get_pretrained_model_and_inputs()
__A : str = model_a(**__lowerCamelCase )
__A : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowerCamelCase )
__A : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
__A : str = model_a(**__lowerCamelCase )
__A : Tuple = after_outputs[0]
__A : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
@require_flax
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCamelCase , text_from_pt=__lowerCamelCase , )
__A : Union[str, Any] = 13
__A : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__A : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__A : Optional[Any] = random_attention_mask([batch_size, 4] )
__A : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = FlaxViTModel(__lowerCamelCase )
__A : str = FlaxBertModel(__lowerCamelCase )
return vision_model, text_model
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = FlaxViTModelTester(self )
__A : Optional[int] = FlaxBertModelTester(self )
__A : Any = vit_model_tester.prepare_config_and_inputs()
__A : List[Any] = bert_model_tester.prepare_config_and_inputs()
__A , __A : int = vision_config_and_inputs
__A , __A , __A , __A : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCamelCase , text_from_pt=__lowerCamelCase , )
__A : Tuple = 13
__A : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__A : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__A : Any = random_attention_mask([batch_size, 4] )
__A : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = FlaxCLIPVisionModel(__lowerCamelCase )
__A : Dict = FlaxBertModel(__lowerCamelCase )
return vision_model, text_model
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = FlaxCLIPVisionModelTester(self )
__A : str = FlaxBertModelTester(self )
__A : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
__A : Tuple = bert_model_tester.prepare_config_and_inputs()
__A , __A : Optional[int] = vision_config_and_inputs
__A , __A , __A , __A : List[str] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
__A : str = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__A : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__A : Optional[int] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''np''' )
__A : Optional[Any] = model(**__lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__A : Optional[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __lowerCamelCase , atol=1e-3 ) )
| 179
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
a_ = logging.getLogger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """masked_bert"""
def __init__( self , __lowerCamelCase=3_0522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase="topK" , __lowerCamelCase="constant" , __lowerCamelCase=0.0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = vocab_size
__A : Union[str, Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Optional[Any] = hidden_act
__A : List[str] = intermediate_size
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : str = type_vocab_size
__A : List[Any] = initializer_range
__A : str = layer_norm_eps
__A : Optional[int] = pruning_method
__A : str = mask_init
__A : Any = mask_scale
| 179
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __magic_name__ (unittest.TestCase ):
def __a ( self , _a ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase_ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "sgugger/tiny-distilbert-classification"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
lowerCAmelCase_ = None
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = AutoConfig.from_pretrained(_a )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Dict:
lowerCAmelCase_ = "sshleifer/tinier_bart"
lowerCAmelCase_ = AutoConfig.from_pretrained(_a )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = AutoConfig.from_pretrained(_a )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> int:
lowerCAmelCase_ = "sshleifer/tinier_bart"
lowerCAmelCase_ = AutoConfig.from_pretrained(_a )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , "inf_time.csv" ) , train_memory_csv_file=os.path.join(_a , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(_a , "inf_mem.csv" ) , train_time_csv_file=os.path.join(_a , "train_time.csv" ) , env_info_csv_file=os.path.join(_a , "env.csv" ) , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , "env.csv" ) ).exists() )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , "sequential" ) )
self.assertTrue(hasattr(_a , "cumulative" ) )
self.assertTrue(hasattr(_a , "current" ) )
self.assertTrue(hasattr(_a , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , "log.txt" ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
lowerCAmelCase_ = PyTorchBenchmark(_a )
lowerCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , "log.txt" ) ).exists() )
| 353
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22
| 0
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =PriorTransformer
a_ ="""hidden_states"""
@property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase : List[str] = 4
__lowerCamelCase : Tuple = 8
__lowerCamelCase : Tuple = 7
__lowerCamelCase : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(_a )
__lowerCamelCase : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_a )
__lowerCamelCase : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self : List[Any] , _a : int=0 ) -> Optional[Any]:
torch.manual_seed(_a )
__lowerCamelCase : List[Any] = 4
__lowerCamelCase : Tuple = 8
__lowerCamelCase : List[str] = 7
__lowerCamelCase : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(_a )
__lowerCamelCase : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(_a )
__lowerCamelCase : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowercase ( self : Optional[int] ) -> Tuple:
return (4, 8)
@property
def _lowercase ( self : int ) -> int:
return (4, 8)
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase : Any = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowerCamelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self : Union[str, Any] ) -> Any:
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_a )
__lowerCamelCase : Any = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowercase ( self : Union[str, Any] ) -> Any:
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Any = self.model_class(**_a )
__lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _a )
def _lowercase ( self : Tuple ) -> List[Any]:
__lowerCamelCase : str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowerCamelCase : Tuple = model.to(_a )
if hasattr(_a , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowerCamelCase : List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**_a )[0]
__lowerCamelCase : Optional[int] = output[0, :5].flatten().cpu()
print(_a )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowerCamelCase : int = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_a , _a , rtol=1e-2 ) )
@slow
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any , _a : Optional[Any]=1 , _a : Dict=768 , _a : List[Any]=77 , _a : List[str]=0 ) -> int:
torch.manual_seed(_a )
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[Any] = embedding_dim
__lowerCamelCase : Any = num_embeddings
__lowerCamelCase : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(_a )
__lowerCamelCase : int = torch.randn((batch_size, embedding_dim) ).to(_a )
__lowerCamelCase : Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self : Optional[int] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _lowercase ( self : Optional[int] , _a : Union[str, Any] , _a : Any ) -> List[str]:
__lowerCamelCase : Optional[Any] = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_a )
__lowerCamelCase : List[str] = self.get_dummy_seed_input(seed=_a )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(**_a )[0]
assert list(sample.shape ) == [1, 768]
__lowerCamelCase : Tuple = sample[0, :8].flatten().cpu()
print(_a )
__lowerCamelCase : Dict = torch.tensor(_a )
assert torch_all_close(_a , _a , atol=1e-3 )
| 208
|
'''simple docstring'''
from collections.abc import Sequence
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> float:
__lowerCamelCase : Any = 0.0
for coeff in reversed(_lowerCAmelCase ):
__lowerCamelCase : Tuple = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 208
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ):
return (preds == labels).mean()
@dataclass
class __lowercase :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
a : str = field(metadata={"help": "Should contain the data files for the task."} )
a : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
__lowercase = processors[data_args.task_name]()
__lowercase = processor.get_labels()
__lowercase = len(lowerCamelCase_ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ : EvalPrediction ) -> Dict:
__lowercase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
__lowercase = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowerCamelCase_ , lowerCamelCase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 217
|
'''simple docstring'''
import heapq
def _lowerCAmelCase ( lowerCamelCase_ : dict ):
__lowercase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 217
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
snake_case : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
snake_case : int = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
a :List[str] = self.transformer_dir
shutil.copy(
os.path.join(_lowerCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
a :Dict = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
a :str = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
a :Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
a :Any = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
a :List[str] = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_lowerCamelCase , '''w''' , newline='''\n''' ) as f:
f.write(_lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase )
with open(_lowerCamelCase , '''r''' ) as f:
self.assertTrue(f.read() , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
# Copy consistency with a really long name
a :str = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('''Bert''' , _lowerCamelCase , _lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _lowerCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
a :List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
a :Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
a :List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
a , a :Optional[Any] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
a , a :Optional[int] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCamelCase )
a :str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
a :Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
a :str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
a , a :int = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 94
|
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Tuple = """time_series_transformer"""
UpperCamelCase_ :Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , _lowercase = None , _lowercase = None , _lowercase = "student_t" , _lowercase = "nll" , _lowercase = 1 , _lowercase = [1, 2, 3, 4, 5, 6, 7] , _lowercase = "mean" , _lowercase = 0 , _lowercase = 0 , _lowercase = 0 , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = 32 , _lowercase = 32 , _lowercase = 2 , _lowercase = 2 , _lowercase = 2 , _lowercase = 2 , _lowercase = True , _lowercase = "gelu" , _lowercase = 64 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 100 , _lowercase = 0.02 , _lowercase=True , **_lowercase , )-> Optional[Any]:
# time series specific configuration
UpperCamelCase_ = prediction_length
UpperCamelCase_ = context_length or prediction_length
UpperCamelCase_ = distribution_output
UpperCamelCase_ = loss
UpperCamelCase_ = input_size
UpperCamelCase_ = num_time_features
UpperCamelCase_ = lags_sequence
UpperCamelCase_ = scaling
UpperCamelCase_ = num_dynamic_real_features
UpperCamelCase_ = num_static_real_features
UpperCamelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase_ = cardinality
else:
UpperCamelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase_ = embedding_dimension
else:
UpperCamelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase_ = input_size * len(_lowercase ) + self._number_of_features
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = use_cache
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase_ ( self )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 60
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE :List[str] = logging.getLogger(__name__)
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=SCREAMING_SNAKE_CASE_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=SCREAMING_SNAKE_CASE_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=SCREAMING_SNAKE_CASE_ , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=SCREAMING_SNAKE_CASE_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=SCREAMING_SNAKE_CASE_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCamelCase_ = parser.parse_args()
return args
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
def fn(SCREAMING_SNAKE_CASE_ ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCamelCase_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCamelCase_ = tf.train.Features(feature=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = tf.train.Example(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE_ )
return records
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase_ = min(len(SCREAMING_SNAKE_CASE_ ) , args.limit )
UpperCamelCase_ = dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
print(f"Limiting the dataset to {args.limit} entries." )
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase_ = tokenize_function(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE_ ):
# Concatenate all texts.
UpperCamelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase_ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase_ = dataset_tokenized.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=1_0_0_0 , num_proc=4 )
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE_ ) , args.shard_size ):
UpperCamelCase_ = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase_ = len(dataset_snapshot["input_ids"] )
UpperCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , f"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase_ = get_serialized_examples(SCREAMING_SNAKE_CASE_ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE_ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase_ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE_ )
print("Wrote file {} containing {} records".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , "w" ) as f:
print(f"Total {args.split} records: {total_records}" , file=SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = parse_args()
main(args)
| 60
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: int =len(__a ) // 2
# choose the middle 3 elements
lowerCamelCase__: int =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
lowercase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowercase__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :Dict ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_lowercase )
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :int ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_lowercase )
def UpperCAmelCase ( self :List[str] , **_lowercase :List[Any] ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ = self.get_image_processor(do_normalize=_lowercase )
lowercase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowercase , return_tensors="np" )
lowercase__ = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "lower newer"
lowercase__ = processor(text=_lowercase , return_tensors="np" )
lowercase__ = tokenizer(_lowercase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "lower newer"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = "google/owlvit-base-patch32"
lowercase__ = OwlViTProcessor.from_pretrained(_lowercase )
lowercase__ = ["cat", "nasa badge"]
lowercase__ = processor(text=_lowercase )
lowercase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = "google/owlvit-base-patch32"
lowercase__ = OwlViTProcessor.from_pretrained(_lowercase )
lowercase__ = [["cat", "nasa badge"], ["person"]]
lowercase__ = processor(text=_lowercase )
lowercase__ = 16
lowercase__ = len(_lowercase )
lowercase__ = max([len(_lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = "google/owlvit-base-patch32"
lowercase__ = OwlViTProcessor.from_pretrained(_lowercase )
lowercase__ = ["cat", "nasa badge"]
lowercase__ = processor(text=_lowercase )
lowercase__ = 16
lowercase__ = inputs["input_ids"]
lowercase__ = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(images=_lowercase , query_images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_lowercase )
lowercase__ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
| 201
|
def _A ( __magic_name__ ):
lowercase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( __magic_name__ ):
lowercase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase__ = remove_duplicates(key.upper() )
lowercase__ = len(__magic_name__ )
# First fill cipher with key characters
lowercase__ = {alphabet[i]: char for i, char in enumerate(__magic_name__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__magic_name__ ) , 26 ):
lowercase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase__ = alphabet[i - offset]
lowercase__ = char
return cipher_alphabet
def _A ( __magic_name__ , __magic_name__ ):
return "".join(cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( ):
lowercase__ = input("Enter message to encode or decode: " ).strip()
lowercase__ = input("Enter keyword: " ).strip()
lowercase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowercase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowercase__ = create_cipher_map(__magic_name__ )
print(func(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 201
| 1
|
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case : Union[str, Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case : Union[str, Any] = logging.getLogger()
def _lowercase ( ) -> List[str]:
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowerCAmelCase : List[str] = parser.parse_args()
return args.f
def _lowercase ( __snake_case ,__snake_case="eval" ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = os.path.join(__lowercase ,F"""{split}_results.json""" )
if os.path.exists(__lowercase ):
with open(__lowercase ,"r" ) as f:
return json.load(__lowercase )
raise ValueError(F"""can\'t find {path}""" )
__snake_case : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( lowerCAmelCase_ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[Any] = F"""\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(snake_case_ , "argv" , snake_case_):
run_flax_glue.main()
__lowerCAmelCase : Union[str, Any] = get_results(snake_case_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.75)
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : int = F"""\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(snake_case_ , "argv" , snake_case_):
run_clm_flax.main()
__lowerCAmelCase : Optional[Any] = get_results(snake_case_)
self.assertLess(result["eval_perplexity"] , 100)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Union[str, Any] = F"""\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n """.split()
with patch.object(snake_case_ , "argv" , snake_case_):
run_summarization_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(snake_case_ , split="test")
self.assertGreaterEqual(result["test_rouge1"] , 10)
self.assertGreaterEqual(result["test_rouge2"] , 2)
self.assertGreaterEqual(result["test_rougeL"] , 7)
self.assertGreaterEqual(result["test_rougeLsum"] , 7)
@slow
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : int = F"""\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n """.split()
with patch.object(snake_case_ , "argv" , snake_case_):
run_mlm_flax.main()
__lowerCAmelCase : Optional[Any] = get_results(snake_case_)
self.assertLess(result["eval_perplexity"] , 42)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : int = F"""\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(snake_case_ , "argv" , snake_case_):
run_ta_mlm_flax.main()
__lowerCAmelCase : Tuple = get_results(snake_case_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.42)
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase : str = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[Any] = F"""\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n """.split()
with patch.object(snake_case_ , "argv" , snake_case_):
run_flax_ner.main()
__lowerCAmelCase : Optional[Any] = get_results(snake_case_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.75)
self.assertGreaterEqual(result["eval_f1"] , 0.3)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : str = F"""\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n """.split()
with patch.object(snake_case_ , "argv" , snake_case_):
run_qa.main()
__lowerCAmelCase : str = get_results(snake_case_)
self.assertGreaterEqual(result["eval_f1"] , 30)
self.assertGreaterEqual(result["eval_exact"] , 30)
| 269
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = StableDiffusionControlNetImgaImgPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = 2
lowerCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = StableDiffusionControlNetImgaImgPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = 2
lowerCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
lowerCAmelCase = 10.0
lowerCAmelCase = 4
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase = 'evil space-punk bird'
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) )
lowerCAmelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) )
lowerCAmelCase = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='np' , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( __snake_case ):
def __init__( self : Optional[Any] , UpperCamelCase__ : str = "▁" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[str, AddedToken] = "<unk>" , UpperCamelCase__ : Union[str, AddedToken] = "</s>" , UpperCamelCase__ : Union[str, AddedToken] = "<pad>" , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__lowerCAmelCase: Union[str, Any] = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
__lowerCAmelCase: List[Any] = token_dict["token"]
__lowerCAmelCase: Dict = Tokenizer(Unigram())
__lowerCAmelCase: Optional[int] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
__lowerCAmelCase: Union[str, Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCamelCase__ , add_prefix_space=UpperCamelCase__),
pre_tokenizers.Digits(individual_digits=UpperCamelCase__),
pre_tokenizers.Punctuation(),
])
__lowerCAmelCase: Tuple = decoders.Metaspace(replacement=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
__lowerCAmelCase: int = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__lowerCAmelCase: Union[str, Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 8_0_0_0 , UpperCamelCase__ : bool = True , )-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = trainers.UnigramTrainer(
vocab_size=UpperCamelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: str = [files]
self._tokenizer.train(UpperCamelCase__ , trainer=UpperCamelCase__)
self.add_unk_id()
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase__ : int = 8_0_0_0 , UpperCamelCase__ : bool = True , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = trainers.UnigramTrainer(
vocab_size=UpperCamelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase__ , )
self._tokenizer.train_from_iterator(UpperCamelCase__ , trainer=UpperCamelCase__)
self.add_unk_id()
def lowercase_ ( self : int)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = json.loads(self._tokenizer.to_str())
__lowerCAmelCase: Union[str, Any] = self.special_tokens["unk"]["id"]
__lowerCAmelCase: Any = Tokenizer.from_str(json.dumps(UpperCamelCase__))
| 217
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = metric_id
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = [MetricMock(__snake_case ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "tmp_path" in args:
__lowerCAmelCase: Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*__SCREAMING_SNAKE_CASE )
| 217
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Tuple=99 , __UpperCAmelCase : int=36 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Tuple=16 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Tuple=6 , __UpperCAmelCase : Dict=6 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=None , __UpperCAmelCase : List[str]=1_000 , ) ->List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
a = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = tmp_coordinate
a = tf.constant(__UpperCAmelCase )
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) ->Optional[int]:
"""simple docstring"""
a = TFLayoutLMvaModel(config=__UpperCAmelCase )
# text + image
a = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase )
a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , training=__UpperCAmelCase , )
a = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model({'''pixel_values''': pixel_values} , training=__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
a = self.num_labels
a = TFLayoutLMvaForSequenceClassification(config=__UpperCAmelCase )
a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
a = self.num_labels
a = TFLayoutLMvaForTokenClassification(config=__UpperCAmelCase )
a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) ->str:
"""simple docstring"""
a = 2
a = TFLayoutLMvaForQuestionAnswering(config=__UpperCAmelCase )
a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , training=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a) , (a) , (a) , (a) , (a)) = config_and_inputs
a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowercase_ ( lowercase , lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) ->Dict:
"""simple docstring"""
return True
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=False ) ->dict:
"""simple docstring"""
a = copy.deepcopy(__UpperCAmelCase )
if model_class in get_values(__UpperCAmelCase ):
a = {
k: tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
a = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCAmelCase ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCAmelCase ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCAmelCase ):
a = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = TFLayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__UpperCAmelCase )
if getattr(__UpperCAmelCase , '''hf_compute_loss''' , __UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
a = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCAmelCase )[0]
]
a = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
a = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a = prepared_for_class.pop('''input_ids''' )
a = model(__UpperCAmelCase , **__UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
a = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
a = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
a = -100
a = tf.convert_to_tensor(__UpperCAmelCase )
a = model(__UpperCAmelCase , **__UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
a = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a = model(__UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
a = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
a = prepared_for_class.keys() - inputs_dict.keys()
a = inspect.signature(model.call ).parameters
a = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
a = {0: '''input_ids'''}
for label_key in label_keys:
a = signature_names.index(__UpperCAmelCase )
a = label_key
a = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
a = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
a = prepared_for_class[value]
a = tuple(__UpperCAmelCase )
# Send to model
a = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFLayoutLMvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _a ( ) -> Optional[int]:
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
a = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' ).pixel_values
a = tf.constant([[1, 2]] )
a = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
a = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
a = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase )
a = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 26
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = GPTSanJapaneseTokenizer
__UpperCamelCase = False
__UpperCamelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCamelCase__ ( self : Optional[int] ):
super().setUp()
# fmt: off
lowerCAmelCase : List[str] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase : int = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowerCAmelCase : List[str] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : int = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowerCAmelCase : List[Any] = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Dict ):
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_input_output_texts(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def lowerCamelCase__ ( self : Any ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase : int = '''こんにちは、世界。 こんばんは、㔺界。'''
lowerCAmelCase : Union[str, Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids without special tokens
lowerCAmelCase : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids with special tokens
lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
lowerCAmelCase : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase : str = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowerCAmelCase : List[Any] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowerCAmelCase : Dict = tokenizer.encode(UpperCamelCase_ )
lowerCAmelCase : Dict = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase : Optional[int] = '''こんにちは、世界。'''
lowerCAmelCase : Dict = '''こんばんは、㔺界。😀'''
lowerCAmelCase : int = '''こんにちは、世界。こんばんは、世界。😀'''
lowerCAmelCase : Any = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase : List[str] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
lowerCAmelCase : Any = tokenizer.encode(UpperCamelCase_ , prefix_text=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase : Optional[int] = '''こんにちは、世界。'''
lowerCAmelCase : Union[str, Any] = '''こんばんは、㔺界。😀'''
lowerCAmelCase : Dict = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
lowerCAmelCase : List[Any] = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
lowerCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase : Tuple = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase : Dict = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase : Optional[int] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase : Dict = tokenizer(UpperCamelCase_ , prefix_text=UpperCamelCase_ ).token_type_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''あンいワ''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase : List[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowerCAmelCase : List[str] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(UpperCamelCase_ , padding=UpperCamelCase_ )
# fmt: off
lowerCAmelCase : Optional[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
lowerCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token.attention_mask , UpperCamelCase_ )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase__ ( self : int ):
# tokenizer has no padding token
pass
| 60
|
"""simple docstring"""
def _snake_case ( _snake_case : list ):
def merge(_snake_case : list , _snake_case : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_snake_case ) <= 1:
return collection
lowerCAmelCase : Union[str, Any] = len(_snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 60
| 1
|
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : Tuple ) -> None:
print(self.vertex )
for i in self.vertex:
print(__magic_name__ , " -> " , " -> ".join([str(__magic_name__ ) for j in self.vertex[i]] ) )
def __A ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__magic_name__ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def __A ( self : List[Any] ) -> None:
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__magic_name__ , __magic_name__ )
def __A ( self : Tuple , __magic_name__ : int , __magic_name__ : list ) -> None:
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(__magic_name__ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
A : Tuple = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 305
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] ) -> Union[str, Any]:
UpperCamelCase__ : List[str] = checkpoints.load_tax_checkpoint(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = flatten_dict(__UpperCAmelCase )
return flax_params
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Union[str, Any]:
UpperCamelCase__ : List[str] = {}
UpperCamelCase__ : List[str] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCamelCase__ : Any = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase__ : str = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase__ : int = new_key.replace(__UpperCAmelCase , __UpperCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase__ : Union[str, Any] = new_key.replace(__UpperCAmelCase , __UpperCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase__ : Optional[Any] = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __UpperCAmelCase )
UpperCamelCase__ : Optional[int] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase__ : List[str] = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __UpperCAmelCase )
UpperCamelCase__ : Dict = flax_dict[key]
UpperCamelCase__ : Dict = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase__ : Optional[Any] = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase__ : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: str=False , __UpperCAmelCase: List[Any]=False ) -> Tuple:
UpperCamelCase__ : Union[str, Any] = get_flax_param(__UpperCAmelCase )
if not use_large:
UpperCamelCase__ : Any = PixaStructVisionConfig()
UpperCamelCase__ : List[str] = PixaStructTextConfig()
else:
UpperCamelCase__ : Dict = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
UpperCamelCase__ : Tuple = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
UpperCamelCase__ : Any = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = PixaStructForConditionalGeneration(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = rename_and_convert_flax_params(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCamelCase__ : Any = PixaStructImageProcessor()
UpperCamelCase__ : str = PixaStructProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
if use_large:
UpperCamelCase__ : str = 4096
UpperCamelCase__ : str = True
# mkdir if needed
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
print('''Model saved in {}'''.format(__UpperCAmelCase ) )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
UpperCAmelCase_ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 201
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: int , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: Any ) -> List[str]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : Tuple = '''lm_head'''
UpperCamelCase__ : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
UpperCamelCase__ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ : List[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : List[str] = value
elif weight_type == "weight_v":
UpperCamelCase__ : str = value
elif weight_type == "bias":
UpperCamelCase__ : Tuple = value
else:
UpperCamelCase__ : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = fairseq_model.state_dict()
UpperCamelCase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : int = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ : Tuple = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
UpperCamelCase__ : Optional[Any] = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
UpperCamelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Optional[Any] = '''weight'''
else:
UpperCamelCase__ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Any , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple ) -> Optional[int]:
UpperCamelCase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : List[str] = name.split('''.''' )
UpperCamelCase__ : str = int(items[0] )
UpperCamelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Optional[int]=True ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : str = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
UpperCamelCase__ : List[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : str = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : Any = target_dict.pad_index
UpperCamelCase__ : str = target_dict.bos_index
UpperCamelCase__ : Any = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols )
UpperCamelCase__ : List[str] = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Optional[Any] = 42
UpperCamelCase__ : List[str] = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Dict = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
UpperCamelCase__ : List[Any] = True if config.feat_extract_norm == '''layer''' else False
UpperCamelCase__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase__ : str = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = UniSpeechForCTC(__UpperCAmelCase )
else:
UpperCamelCase__ : Any = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ : Tuple = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 201
| 1
|
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {}
__UpperCAmelCase = {}
__UpperCAmelCase = {}
def _snake_case ( A , A , A = None , ) -> str:
lowerCAmelCase__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowerCAmelCase__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowerCAmelCase__ = format_type
def _snake_case ( A , A , A = None ) -> List[Any]:
lowerCAmelCase__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCAmelCase__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCAmelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCAmelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCAmelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def _snake_case ( A ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _snake_case ( A , **A ) -> Formatter:
lowerCAmelCase__ = get_format_type_from_alias(A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 353
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _snake_case ( A , A , A , A=5 ) -> List[str]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowerCAmelCase__ = torch.tensor(tokenizer.encode(A , add_special_tokens=A ) ).unsqueeze(0 ) # Batch size 1
lowerCAmelCase__ = model(A )[0] # The last hidden-state is the first element of the output tuple
lowerCAmelCase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCAmelCase__ = logits[0, masked_index, :]
lowerCAmelCase__ = logits.softmax(dim=0 )
lowerCAmelCase__ , lowerCAmelCase__ = prob.topk(k=A , dim=0 )
lowerCAmelCase__ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A ) )] )
lowerCAmelCase__ = tokenizer.mask_token
lowerCAmelCase__ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowerCAmelCase__ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A ) , A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A , A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__UpperCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''')
__UpperCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__UpperCAmelCase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 228
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowercase__ ( _a):
UpperCamelCase_ = """MCTCTFeatureExtractor"""
UpperCamelCase_ = """AutoTokenizer"""
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE : Any = False
def __call__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''audio''' , snake_case_ )
SCREAMING_SNAKE_CASE : str = kwargs.pop('''sampling_rate''' , snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''text''' , snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE : int = encodings["""input_ids"""]
return inputs
def __A ( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __A ( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''input_features''' , snake_case_ )
SCREAMING_SNAKE_CASE : str = kwargs.pop('''labels''' , snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE : Tuple = args[0]
SCREAMING_SNAKE_CASE : Tuple = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE : str = self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
if labels is not None:
SCREAMING_SNAKE_CASE : Any = self.tokenizer.pad(snake_case_ , **snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE : str = labels["""input_ids"""]
return input_features
def __A ( self : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def __A ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE : Any = False
| 182
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowerCamelCase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_lowerCamelCase : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_lowerCamelCase : Optional[Any] = PipelineDataFormat.from_str(
format=_lowerCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowerCamelCase , _lowerCamelCase )
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: Pipeline ,__lowerCAmelCase: PipelineDataFormat ):
'''simple docstring'''
_lowerCamelCase : List[str] = nlp
_lowerCamelCase : str = reader
@staticmethod
def _lowercase ( __lowerCAmelCase: ArgumentParser ):
'''simple docstring'''
_lowerCamelCase : List[str] = parser.add_parser("run" ,help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" ,choices=get_supported_tasks() ,help="Task to run" )
run_parser.add_argument("--input" ,type=__lowerCAmelCase ,help="Path to the file to use for inference" )
run_parser.add_argument("--output" ,type=__lowerCAmelCase ,help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" ,type=__lowerCAmelCase ,help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" ,type=__lowerCAmelCase ,help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" ,type=__lowerCAmelCase ,help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" ,type=__lowerCAmelCase ,help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" ,)
run_parser.add_argument(
"--format" ,type=__lowerCAmelCase ,default="infer" ,choices=PipelineDataFormat.SUPPORTED_FORMATS ,help="Input format to read from" ,)
run_parser.add_argument(
"--device" ,type=__lowerCAmelCase ,default=-1 ,help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" ,)
run_parser.add_argument("--overwrite" ,action="store_true" ,help="Allow overwriting the output file." )
run_parser.set_defaults(func=__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self._nlp, []
for entry in self._reader:
_lowerCamelCase : List[str] = nlp(**__lowerCAmelCase ) if self._reader.is_multi_columns else nlp(__lowerCAmelCase )
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
outputs.append(__lowerCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_lowerCamelCase : str = self._reader.save_binary(__lowerCAmelCase )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(__lowerCAmelCase )
| 340
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
| 340
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = None
_a = "utf-8"
_a = None
_a = None
_a = True # deprecated
_a = None # deprecated
_a = 1_0 << 2_0 # 10MB
_a = None
class lowercase ( datasets.ArrowBasedBuilder ):
_a = JsonConfig
def a__ ( self ) -> Union[str, Any]:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_A : Optional[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_A : List[str] = data_files
if isinstance(_a , _a ):
_A : Optional[int] = [files]
_A : str = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_A : Optional[int] = [files]
_A : Tuple = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self , _a ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_A : List[Any] = self.config.features.arrow_schema.field(_a ).type
_A : str = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_A : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def a__ ( self , _a ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A : Any = json.load(_a )
# We keep only the field we are interested in
_A : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a , (list, tuple) ):
_A : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
_A : Union[str, Any] = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
_A : List[str] = dataset
_A : Optional[Any] = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a , """rb""" ) as f:
_A : Dict = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_A : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
_A : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_A : List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_A : List[str] = batch.decode(self.config.encoding , errors=_a ).encode("""utf-8""" )
try:
while True:
try:
_A : Union[str, Any] = paj.read_json(
io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a , pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A : List[str] = json.load(_a )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a , _a ): # list is the only sequence type supported in JSON
try:
_A : str = set().union(*[row.keys() for row in dataset] )
_A : Dict = {col: [row.get(_a ) for row in dataset] for col in keys}
_A : List[Any] = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 26
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26
| 1
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = os.path.join(args.tf_model_dir , """parameters.json""" )
UpperCAmelCase = json.loads(open(_snake_case ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
UpperCAmelCase = args.output + """.pt"""
UpperCAmelCase = OrderedDict()
with tf.device("""/CPU:0""" ):
UpperCAmelCase = tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase = reader.get_tensor(_snake_case ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
UpperCAmelCase = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
UpperCAmelCase = 8
UpperCAmelCase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.startswith("""model/moe""" ):
UpperCAmelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/softmlp/kernel""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
UpperCAmelCase = key_name[-9:-7]
for i in range(16 ):
UpperCAmelCase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
UpperCAmelCase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.startswith("""model/mlp""" ):
UpperCAmelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/p1/bias""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
UpperCAmelCase = vnp.copy() # same because it is one dimensional
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/p2/kernel""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/p2/bias""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
UpperCAmelCase = vnp.copy() # same because it is one dimensional
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.startswith("""model/ln""" ):
UpperCAmelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.norm.bias""" % player
UpperCAmelCase = vnp.copy() # same because it is one dimensional
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/g""" ):
UpperCAmelCase = """model.blocks.%d.feed_forward.norm.weight""" % player
UpperCAmelCase = vnp.copy() # same because it is one dimensional
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.startswith("""model/att""" ):
UpperCAmelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
UpperCAmelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase = state[:, 0, :, :]
UpperCAmelCase = state[:, 1, :, :]
UpperCAmelCase = state[:, 2, :, :]
UpperCAmelCase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
UpperCAmelCase = torch.tensor(_snake_case )
UpperCAmelCase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
UpperCAmelCase = torch.tensor(_snake_case )
UpperCAmelCase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/o/kernel""" ):
UpperCAmelCase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
UpperCAmelCase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.startswith("""model/an""" ):
UpperCAmelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
UpperCAmelCase = """model.blocks.%d.self_attn.norm.bias""" % player
UpperCAmelCase = vnp.copy() # same because it is one dimensional
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.endswith("""/g""" ):
UpperCAmelCase = """model.blocks.%d.self_attn.norm.weight""" % player
UpperCAmelCase = vnp.copy() # same because it is one dimensional
UpperCAmelCase = torch.tensor(_snake_case )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
UpperCAmelCase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
UpperCAmelCase = """model.%s.weight""" % nlayer
UpperCAmelCase = vnp.copy() # same in embedded
UpperCAmelCase = torch.tensor(_snake_case )
if key_name.startswith("""model/wte""" ):
UpperCAmelCase = """lm_head.weight"""
UpperCAmelCase = vnp.copy() # same in embedded
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name.startswith("""model/wob""" ):
UpperCAmelCase = """final_logits_bias"""
UpperCAmelCase = vnp.copy() # same in embedded
UpperCAmelCase = state.reshape((1, -1) )
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name == "model/dense/kernel":
UpperCAmelCase = """model.last_project.weight"""
UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase = torch.tensor(_snake_case )
elif key_name == "model/dense_1/bias":
UpperCAmelCase = """model.last_project.bias"""
UpperCAmelCase = vnp.copy() # same because it is one dimensional
UpperCAmelCase = torch.tensor(_snake_case )
torch.save(_snake_case , args.output )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
_UpperCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 368
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_UpperCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
if weight_type is not None:
UpperCAmelCase = getattr(_snake_case , _snake_case ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_snake_case )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "bias" in name:
UpperCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = """weight"""
else:
UpperCAmelCase = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=True ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = UniSpeechSatConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = UniSpeechSatConfig()
UpperCAmelCase = """"""
if is_finetuned:
UpperCAmelCase = UniSpeechSatForCTC(_snake_case )
else:
UpperCAmelCase = UniSpeechSatForPreTraining(_snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
UpperCAmelCase = model[0].eval()
recursively_load_weights(_snake_case , _snake_case )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 234
| 0
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "levit"
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=2_2_4 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : int=1_6 , lowerCAmelCase_ : str=[1_2_8, 2_5_6, 3_8_4] , lowerCAmelCase_ : Optional[int]=[4, 8, 1_2] , lowerCAmelCase_ : Optional[int]=[4, 4, 4] , lowerCAmelCase_ : Dict=[1_6, 1_6, 1_6] , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[Any]=[2, 2, 2] , lowerCAmelCase_ : List[str]=[2, 2, 2] , lowerCAmelCase_ : List[Any]=0.02 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = kernel_size
lowercase_ = stride
lowercase_ = padding
lowercase_ = hidden_sizes
lowercase_ = num_attention_heads
lowercase_ = depths
lowercase_ = key_dim
lowercase_ = drop_path_rate
lowercase_ = patch_size
lowercase_ = attention_ratio
lowercase_ = mlp_ratio
lowercase_ = initializer_range
lowercase_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = version.parse("1.11" )
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
| 370
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 0
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : str = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """xlnet"""
UpperCamelCase__ = ["""mems"""]
UpperCamelCase__ = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , __UpperCamelCase : Optional[int]=3_2_0_0_0 , __UpperCamelCase : Union[str, Any]=1_0_2_4 , __UpperCamelCase : str=2_4 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : Tuple=4_0_9_6 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str="bi" , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : List[str]=1e-12 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : List[str]=5_1_2 , __UpperCamelCase : Any=None , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Union[str, Any]=-1 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Dict="last" , __UpperCamelCase : str=True , __UpperCamelCase : List[Any]="tanh" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[Any]=5 , __UpperCamelCase : int=5 , __UpperCamelCase : Union[str, Any]=5 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : str=2 , **__UpperCamelCase : List[Any] , )->List[Any]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_UpperCAmelCase = d_model // n_head
_UpperCAmelCase = ff_activation
_UpperCAmelCase = d_inner
_UpperCAmelCase = untie_r
_UpperCAmelCase = attn_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = dropout
_UpperCAmelCase = mem_len
_UpperCAmelCase = reuse_len
_UpperCAmelCase = bi_data
_UpperCAmelCase = clamp_len
_UpperCAmelCase = same_length
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_last_dropout
_UpperCAmelCase = start_n_top
_UpperCAmelCase = end_n_top
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs['''use_cache''']
_UpperCAmelCase = use_mems_eval
_UpperCAmelCase = use_mems_train
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : Union[str, Any] )->List[Any]:
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowercase__ ( self : Any , __UpperCamelCase : Union[str, Any] )->Dict:
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 260
|
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase = None ) -> list[list[str]]:
a = word_bank or []
# create a table
a = len(__lowerCamelCase ) + 1
a = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 228
| 0
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list:
UpperCamelCase = word.split()
def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
UpperCamelCase = max_width - width
UpperCamelCase = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
UpperCamelCase ,UpperCamelCase = [word], len(__UpperCamelCase )
UpperCamelCase = max_width - width - len(__UpperCamelCase )
answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 183
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( lowerCamelCase ):
lowercase = """Salesforce/blip-image-captioning-base"""
lowercase = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase = """image_captioner"""
lowercase = AutoModelForVisionaSeq
lowercase = ["""image"""]
lowercase = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
| 183
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCamelCase_ ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
lowerCAmelCase__ = PipelineDataFormat.from_str(
format=UpperCamelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCamelCase_ , UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = nlp
lowerCAmelCase__ = reader
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=__UpperCAmelCase , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=__UpperCAmelCase , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=__UpperCAmelCase , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=__UpperCAmelCase , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=__UpperCAmelCase , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=__UpperCAmelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=__UpperCAmelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self._nlp, []
for entry in self._reader:
lowerCAmelCase__ = nlp(**__UpperCAmelCase ) if self._reader.is_multi_columns else nlp(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
outputs.append(__UpperCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase__ = self._reader.save_binary(__UpperCAmelCase )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(__UpperCAmelCase )
| 340
|
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : Optional[Any] = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowercase__ : int = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowercase__ : Optional[int] = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowercase__ : Tuple = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : str = get_logger(__name__)
lowercase__ : List[str] = Path(__file__).parent / '''model_card_template.md'''
lowercase__ : Union[str, Any] = uuida().hex
lowercase__ : Tuple = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __lowercase ( _a = None ):
snake_case_ : List[str] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_a , _a ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
return ua
def __lowercase ( _a , _a = None , _a = None ):
if token is None:
snake_case_ : Union[str, Any] = HfFolder.get_token()
if organization is None:
snake_case_ : int = whoami(_a )['''name''']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def __lowercase ( _a , _a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_a , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ : Union[str, Any] = args.hub_token if hasattr(_a , '''hub_token''' ) else None
snake_case_ : Dict = get_full_repo_name(_a , token=_a )
snake_case_ : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_a , model_name=_a , repo_name=_a , dataset_name=args.dataset_name if hasattr(_a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_a , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ : Tuple = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_a )
def __lowercase ( _a , _a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ : Tuple = str(Path(_a ).as_posix() )
snake_case_ : int = re.search(r'''snapshots/([^/]+)/''' , _a )
if search is None:
return None
snake_case_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : str = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowercase__ : List[Any] = os.path.join(hf_cache_home, '''diffusers''')
def __lowercase ( _a = None , _a = None ):
if new_cache_dir is None:
snake_case_ : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ : List[str] = old_diffusers_cache
snake_case_ : Union[str, Any] = Path(_a ).expanduser()
snake_case_ : str = Path(_a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ : List[Any] = new_cache_dir / old_blob_path.relative_to(_a )
new_blob_path.parent.mkdir(parents=_a , exist_ok=_a )
os.replace(_a , _a )
try:
os.symlink(_a , _a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Optional[Any] = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowercase__ : Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : Optional[Any] = int(f.read())
except ValueError:
lowercase__ : Optional[Any] = 0
if cache_version < 1:
lowercase__ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowercase__ : Optional[Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __lowercase ( _a , _a = None ):
if variant is not None:
snake_case_ : str = weights_name.split('''.''' )
snake_case_ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
snake_case_ : List[Any] = '''.'''.join(_a )
return weights_name
def __lowercase ( _a , *,
_a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a=None , ):
snake_case_ : Dict = str(_a )
if os.path.isfile(_a ):
return pretrained_model_name_or_path
elif os.path.isdir(_a ):
if os.path.isfile(os.path.join(_a , _a ) ):
# Load from a PyTorch checkpoint
snake_case_ : Dict = os.path.join(_a , _a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_a , _a , _a ) ):
snake_case_ : List[Any] = os.path.join(_a , _a , _a )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ : str = hf_hub_download(
_a , filename=_add_variant(_a , _a ) , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _a , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_a , _a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_a , _a )}' so that the correct variant file can be added." , _a , )
try:
# 2. Load model file as usual
snake_case_ : Tuple = hf_hub_download(
_a , filename=_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 155
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__( UpperCAmelCase__ ):
lowercase__ = "dandelin/vilt-b32-finetuned-vqa"
lowercase__ = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
lowercase__ = "image_qa"
lowercase__ = AutoProcessor
lowercase__ = AutoModelForVisualQuestionAnswering
lowercase__ = ["image", "text"]
lowercase__ = ["text"]
def __init__( self : Any , *__snake_case : List[str] , **__snake_case : Any ):
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self : List[Any] , __snake_case : "Image" , __snake_case : str ):
return self.pre_processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors='pt' )
def lowercase_ ( self : str , __snake_case : List[str] ):
with torch.no_grad():
return self.model(**lowerCamelCase__ ).logits
def lowercase_ ( self : Any , __snake_case : Union[str, Any] ):
a : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 297
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = "deta"
lowerCAmelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[Any] , lowerCamelCase__ : str=None , lowerCamelCase__ : str=9_00 , lowerCamelCase__ : Any=20_48 , lowerCamelCase__ : Optional[int]=6 , lowerCamelCase__ : str=20_48 , lowerCamelCase__ : Dict=8 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Union[str, Any]=10_24 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int="relu" , lowerCamelCase__ : str=2_56 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : List[Any]=1.0 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=False , lowerCamelCase__ : Any="sine" , lowerCamelCase__ : str=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=3_00 , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Union[str, Any]=5 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.2_5 , **lowerCamelCase__ : Optional[Any] , ) ->List[str]:
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : int = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = backbone_config.pop("model_type" )
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : List[str] = config_class.from_dict(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = backbone_config
_UpperCAmelCase : Optional[int] = num_queries
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : str = encoder_ffn_dim
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : int = encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
_UpperCAmelCase : Tuple = decoder_layers
_UpperCAmelCase : Union[str, Any] = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Optional[int] = activation_function
_UpperCAmelCase : str = init_std
_UpperCAmelCase : Tuple = init_xavier_std
_UpperCAmelCase : Optional[Any] = encoder_layerdrop
_UpperCAmelCase : int = auxiliary_loss
_UpperCAmelCase : Union[str, Any] = position_embedding_type
# deformable attributes
_UpperCAmelCase : List[Any] = num_feature_levels
_UpperCAmelCase : List[Any] = encoder_n_points
_UpperCAmelCase : Tuple = decoder_n_points
_UpperCAmelCase : Optional[int] = two_stage
_UpperCAmelCase : Dict = two_stage_num_proposals
_UpperCAmelCase : int = with_box_refine
_UpperCAmelCase : str = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : int = giou_cost
# Loss coefficients
_UpperCAmelCase : int = mask_loss_coefficient
_UpperCAmelCase : List[Any] = dice_loss_coefficient
_UpperCAmelCase : Dict = bbox_loss_coefficient
_UpperCAmelCase : int = giou_loss_coefficient
_UpperCAmelCase : Optional[Any] = eos_coefficient
_UpperCAmelCase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return self.d_model
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 234
| 0
|
import comet # From: unbabel-comet
import torch
import datasets
_A = datasets.logging.get_logger(__name__)
_A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.config_name == "default":
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ):
"""simple docstring"""
if gpus is None:
UpperCamelCase_ = 1 if torch.cuda.is_available() else 0
UpperCamelCase_ = {"""src""": sources, """mt""": predictions, """ref""": references}
UpperCamelCase_ = [dict(zip(__UpperCamelCase , __UpperCamelCase ) ) for t in zip(*data.values() )]
UpperCamelCase_ , UpperCamelCase_ = self.scorer.predict(__UpperCamelCase , gpus=__UpperCamelCase , progress_bar=__UpperCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 360
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = """EncodecFeatureExtractor"""
A__ : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.feature_extractor
UpperCamelCase_ = False
def lowerCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCamelCase , language=__UpperCamelCase , no_timestamps=__UpperCamelCase )
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""sampling_rate""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
UpperCamelCase_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if audio is not None:
UpperCamelCase_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCamelCase_ = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
UpperCamelCase_ = audio_inputs["""padding_mask"""]
return inputs
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""padding_mask""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCamelCase , padding_mask=__UpperCamelCase )
else:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = to_numpy(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = audio_values.shape
if padding_mask is None:
return list(__UpperCamelCase )
UpperCamelCase_ = to_numpy(__UpperCamelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCamelCase_ = seq_len - padding_mask.shape[-1]
UpperCamelCase_ = 1 - self.feature_extractor.padding_value
UpperCamelCase_ = np.pad(__UpperCamelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCamelCase )
UpperCamelCase_ = audio_values.tolist()
for i in range(__UpperCamelCase ):
UpperCamelCase_ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCamelCase_ = sliced_audio.reshape(__UpperCamelCase , -1 )
return audio_values
| 261
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : int ={
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a_ ( a__ ):
__A = 'perceiver'
def __init__( self : Optional[int] , lowercase : Optional[int]=256 , lowercase : Any=1_280 , lowercase : Union[str, Any]=768 , lowercase : List[str]=1 , lowercase : List[Any]=26 , lowercase : int=8 , lowercase : Dict=8 , lowercase : List[str]=None , lowercase : Union[str, Any]=None , lowercase : List[Any]="kv" , lowercase : Any=1 , lowercase : Optional[Any]=1 , lowercase : str="gelu" , lowercase : str=0.1 , lowercase : Dict=0.02 , lowercase : Optional[int]=1e-1_2 , lowercase : Optional[int]=True , lowercase : str=262 , lowercase : Tuple=2_048 , lowercase : Dict=56 , lowercase : Any=[368, 496] , lowercase : Any=16 , lowercase : Union[str, Any]=1_920 , lowercase : Optional[int]=16 , lowercase : Union[str, Any]=[1, 16, 224, 224] , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
lowercase_ :Union[str, Any] = num_latents
lowercase_ :Any = d_latents
lowercase_ :List[Any] = d_model
lowercase_ :str = num_blocks
lowercase_ :Optional[Any] = num_self_attends_per_block
lowercase_ :int = num_self_attention_heads
lowercase_ :Any = num_cross_attention_heads
lowercase_ :List[Any] = qk_channels
lowercase_ :Dict = v_channels
lowercase_ :List[Any] = cross_attention_shape_for_attention
lowercase_ :Union[str, Any] = self_attention_widening_factor
lowercase_ :Tuple = cross_attention_widening_factor
lowercase_ :Optional[Any] = hidden_act
lowercase_ :List[Any] = attention_probs_dropout_prob
lowercase_ :Optional[Any] = initializer_range
lowercase_ :Any = layer_norm_eps
lowercase_ :Union[str, Any] = use_query_residual
# masked language modeling attributes
lowercase_ :int = vocab_size
lowercase_ :Optional[Any] = max_position_embeddings
# image classification attributes
lowercase_ :Tuple = image_size
# flow attributes
lowercase_ :Union[str, Any] = train_size
# multimodal autoencoding attributes
lowercase_ :Tuple = num_frames
lowercase_ :List[Any] = audio_samples_per_frame
lowercase_ :str = samples_per_patch
lowercase_ :List[str] = output_shape
class a_ ( a__ ):
@property
def lowercase__ ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ :List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ :List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return 1e-4
def lowercase__ ( self : Dict , lowercase : Dict , lowercase : Any = -1 , lowercase : List[str] = -1 , lowercase : Dict = -1 , lowercase : Union[str, Any] = False , lowercase : Optional[Any] = None , lowercase : Dict = 3 , lowercase : Union[str, Any] = 40 , lowercase : Tuple = 40 , ):
"""simple docstring"""
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ :Any = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ :Union[str, Any] = preprocessor.num_special_tokens_to_add(_lowerCamelCase )
lowercase_ :Dict = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase_ :Union[str, Any] = [''' '''.join(["a"] ) * seq_length] * batch_size
lowercase_ :Any = dict(preprocessor(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
lowercase_ :Optional[Any] = inputs.pop("input_ids" )
return inputs
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ :str = compute_effective_axis_dimension(_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase_ :Tuple = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase_ :str = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
lowercase_ :Optional[int] = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 223
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase_ ( _A , _A , _A , _A=10_24 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = [], []
SCREAMING_SNAKE_CASE__ = list(zip(__A , __A ) )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sorted_examples[0]
def is_too_big(_A ):
return tok(__A , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE__ = new_src + ''' ''' + src
SCREAMING_SNAKE_CASE__ = new_tgt + ''' ''' + tgt
if is_too_big(__A ) or is_too_big(__A ): # cant fit, finalize example
finished_src.append(__A )
finished_tgt.append(__A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__A )
finished_tgt.append(__A )
return finished_src, finished_tgt
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Path(__A )
save_path.mkdir(exist_ok=__A )
for split in ["train"]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
SCREAMING_SNAKE_CASE__ = [x.rstrip() for x in Path(__A ).open().readlines()]
SCREAMING_SNAKE_CASE__ = [x.rstrip() for x in Path(__A ).open().readlines()]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pack_examples(__A , __A , __A , __A )
print(F'''packed {split} split from {len(__A )} examples -> {len(__A )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(__A ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(__A ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(__A , save_path / F'''{split}.source''' )
shutil.copyfile(__A , save_path / F'''{split}.target''' )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__A , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__A , default=1_28 )
parser.add_argument('''--data_dir''' , type=__A )
parser.add_argument('''--save_path''' , type=__A )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 357
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 218
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = BartphoTokenizer
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[Any] = True
def UpperCamelCase ( self : Dict ) -> Dict:
super().setUp()
lowerCamelCase_ = ['▁This', '▁is', '▁a', '▁t', 'est']
lowerCamelCase_ = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
lowerCamelCase_ = BartphoTokenizer(__SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : str ) -> Tuple:
lowerCamelCase_ = 'This is a là test'
lowerCamelCase_ = 'This is a<unk><unk> test'
return input_text, output_text
def UpperCamelCase ( self : int ) -> str:
lowerCamelCase_ = BartphoTokenizer(__SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCamelCase_ = 'This is a là test'
lowerCamelCase_ = '▁This ▁is ▁a ▁l à ▁t est'.split()
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
| 183
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 ) -> int:
lowerCamelCase_ = prime_sieve(_lowerCamelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class snake_case__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case : Any = data
snake_case : Optional[Any] = None
def __str__( self : int ) -> str:
"""simple docstring"""
return f'{self.data}'
class snake_case__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Dict ) -> None:
"""simple docstring"""
snake_case : Union[str, Any] = None
def __iter__( self : int ) -> Iterator[T]:
"""simple docstring"""
snake_case : int = self.top
while node:
yield node.data
snake_case : Optional[int] = node.next
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return "->".join([str(a__ ) for item in self] )
def __len__( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def lowerCAmelCase ( self : Optional[int] ) -> bool:
"""simple docstring"""
return self.top is None
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] ) -> None:
"""simple docstring"""
snake_case : str = Node(a__ )
if not self.is_empty():
snake_case : Union[str, Any] = self.top
snake_case : str = node
def lowerCAmelCase ( self : Optional[Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , a__ )
snake_case : List[Any] = self.top
snake_case : Union[str, Any] = self.top.next
return pop_node.data
def lowerCAmelCase ( self : Any ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def lowerCAmelCase ( self : str ) -> None:
"""simple docstring"""
snake_case : List[Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Any=[10, 20, 30, 40] , UpperCamelCase__ : Any=[1, 1, 2, 1] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : int = image_size
snake_case : Any = num_channels
snake_case : Optional[int] = embeddings_size
snake_case : Optional[int] = hidden_sizes
snake_case : str = depths
snake_case : Tuple = is_training
snake_case : List[str] = use_labels
snake_case : List[str] = hidden_act
snake_case : Tuple = num_labels
snake_case : Tuple = scope
snake_case : List[str] = len(UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Any = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModel(config=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : List[str] = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : str = self.prepare_config_and_inputs()
snake_case ,snake_case : Tuple = config_and_inputs
snake_case : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : int = [*signature.parameters.keys()]
snake_case : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest('''JIT Enabled''' ):
snake_case : Optional[int] = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case : Tuple = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
snake_case : str = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
snake_case : Any = self.default_image_processor
snake_case : Any = prepare_img()
snake_case : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors='''np''' )
snake_case : List[str] = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Dict = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 83
| 0
|
"""simple docstring"""
__lowercase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.35_58_18,
}
def lowercase ( A_ , A_ , A_ )-> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
a : Optional[int] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(A_ )}'''
)
raise ValueError(A_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowercase () -> Dict:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=snake_case__ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=snake_case__ , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=snake_case__ , help="""where to store parsed gold_data_path file""" , )
lowerCAmelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
lowerCAmelCase = json.load(snake_case__ )
for dpr_record in tqdm(snake_case__ ):
lowerCAmelCase = dpr_record["""question"""]
lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(snake_case__ ) + """\n""" )
if __name__ == "__main__":
main()
| 155
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : int = {"vocab_file": "vocab.json"}
_lowerCamelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_lowerCamelCase : Optional[Any] = {"mgp-str": 27}
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int="[GO]" , UpperCamelCase__ : Dict="[GO]" , UpperCamelCase__ : Union[str, Any]="[s]" , UpperCamelCase__ : Optional[Any]="[GO]" , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(lowerCAmelCase__ )
UpperCamelCase = {v: k for k, v in self.vocab.items()}
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def A ( self : Tuple ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def A ( self : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def A ( self : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def A ( self : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def A ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
return (vocab_file,)
| 366
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = int('0b' + data[0] + data[-1] , 2 )
UpperCamelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(A__ , A__ )
UpperCamelCase = xor(A__ , A__ )
UpperCamelCase = apply_sbox(A__ , temp[:4] ) # noqa: E741
UpperCamelCase = apply_sbox(A__ , temp[4:] )
UpperCamelCase = '0' * (2 - len(A__ )) + l # noqa: E741
UpperCamelCase = '0' * (2 - len(A__ )) + r
UpperCamelCase = apply_table(l + r , A__ )
UpperCamelCase = xor(A__ , A__ )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : str = input("Enter 10 bit key: ")
_lowerCamelCase : Optional[Any] = input("Enter 8 bit message: ")
_lowerCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Union[str, Any] = [2, 4, 3, 1]
_lowerCamelCase : int = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : Any = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : str = apply_table(key, paa_table)
_lowerCamelCase : str = temp[:5]
_lowerCamelCase : Any = temp[5:]
_lowerCamelCase : Dict = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
_lowerCamelCase : Optional[int] = left_shift(left)
_lowerCamelCase : Union[str, Any] = left_shift(right)
_lowerCamelCase : Tuple = left_shift(left)
_lowerCamelCase : Optional[int] = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Dict = apply_table(message, IP)
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Any = temp[4:] + temp[:4]
_lowerCamelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowerCamelCase : List[str] = apply_table(CT, IP)
_lowerCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = temp[4:] + temp[:4]
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 249
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a :List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 132
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : Tuple = logging.getLogger(__name__)
@dataclass
class lowercase__ :
_UpperCAmelCase :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
_UpperCAmelCase :bool = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Whether tp freeze the encoder."} )
_UpperCAmelCase :bool = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowercase__ :
_UpperCAmelCase :str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_UpperCAmelCase :Optional[str] = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
_UpperCAmelCase :Optional[int] = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
_UpperCAmelCase :Optional[int] = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
_UpperCAmelCase :Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
_UpperCAmelCase :Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
_UpperCAmelCase :Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."} )
_UpperCAmelCase :Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} )
_UpperCAmelCase :Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."} )
_UpperCAmelCase :Optional[str] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Source language id for translation."} )
_UpperCAmelCase :Optional[str] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Target language id for translation."} )
_UpperCAmelCase :Optional[int] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "# num_beams to use for evaluation."} )
_UpperCAmelCase :bool = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ) -> Tuple:
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F"""{split}_results.json""" ) )
def _snake_case ( ) -> str:
lowerCamelCase_ : Optional[int] =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =parser.parse_args_into_dataclasses()
check_output_dir(lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Tuple =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ : List[Any] =("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
assert hasattr(lowerCAmelCase__ , lowerCAmelCase__ ), F"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCamelCase_ : List[str] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ : int =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCAmelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCamelCase_ : Dict =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCamelCase_ : Optional[Any] =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCamelCase_ : int =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCAmelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCamelCase_ : List[str] =SeqaSeqDataset
# Get datasets
lowerCamelCase_ : str =(
dataset_class(
lowerCAmelCase__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowerCamelCase_ : Any =(
dataset_class(
lowerCAmelCase__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCamelCase_ : str =(
dataset_class(
lowerCAmelCase__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCamelCase_ : Optional[int] =(
build_compute_metrics_fn(data_args.task , lowerCAmelCase__ ) if training_args.predict_with_generate else None
)
lowerCamelCase_ : Dict =SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , data_args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , data_collator=SeqaSeqDataCollator(
lowerCAmelCase__ , lowerCAmelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
lowerCamelCase_ : Optional[int] ={}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowerCamelCase_ : Tuple =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCamelCase_ : Tuple =train_result.metrics
lowerCamelCase_ : List[Any] =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ : Optional[int] =trainer.evaluate(metric_key_prefix="val" )
lowerCamelCase_ : int =data_args.n_val
lowerCamelCase_ : int =round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ : List[Any] =trainer.predict(test_dataset=lowerCAmelCase__ , metric_key_prefix="test" )
lowerCamelCase_ : Any =test_output.metrics
lowerCamelCase_ : Dict =data_args.n_test
if trainer.is_world_process_zero():
lowerCamelCase_ : Union[str, Any] =round(metrics["test_loss"] , 4 )
handle_metrics("test" , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.predict_with_generate:
lowerCamelCase_ : Tuple =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
lowerCamelCase_ : Dict =lmap(str.strip , lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(lowerCAmelCase__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _snake_case ( lowerCamelCase__ : int ) -> Tuple:
main()
if __name__ == "__main__":
main()
| 359
|
"""simple docstring"""
import torch
def _snake_case ( ) -> Union[str, Any]:
if torch.cuda.is_available():
lowerCamelCase_ : int =torch.cuda.device_count()
else:
lowerCamelCase_ : List[str] =0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 209
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase : Optional[Any] = False
class __magic_name__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a =torch.manual_seed(0 )
__a =pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 218
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase: List[str] = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Dict = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCAmelCase: Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336
| 1
|
from itertools import permutations
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowerCamelCase = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : int = 10 ) -> int:
"""simple docstring"""
return sum(
int(''.join(map(UpperCamelCase__ , UpperCamelCase__ ) ) )
for num in permutations(range(UpperCamelCase__ ) )
if is_substring_divisible(UpperCamelCase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=4 ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[Any] = num_choices
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase__ ,)
return config, input_ids, attention_mask
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Any = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCamelCase_ = DetaConfig(
backbone_config=UpperCamelCase_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=UpperCamelCase_ , with_box_refine=UpperCamelCase_ , two_stage=UpperCamelCase_ , )
# set labels
UpperCamelCase_ = """huggingface/label-files"""
if "o365" in model_name:
UpperCamelCase_ = 366
UpperCamelCase_ = """object365-id2label.json"""
else:
UpperCamelCase_ = 91
UpperCamelCase_ = """coco-detection-id2label.json"""
UpperCamelCase_ = num_labels
UpperCamelCase_ = json.load(open(cached_download(hf_hub_url(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase_ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[int]:
UpperCamelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = dct.pop(UpperCamelCase_ )
UpperCamelCase_ = val
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase_ = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase_ = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ = in_proj_weight[:dim, :]
UpperCamelCase_ = in_proj_bias[: dim]
UpperCamelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase_ = in_proj_weight[
-dim :, :
]
UpperCamelCase_ = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
# transformer decoder self-attention layers
UpperCamelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase_ = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase_ = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ = in_proj_weight[:hidden_size, :]
UpperCamelCase_ = in_proj_bias[:hidden_size]
UpperCamelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase_ = in_proj_weight[-hidden_size:, :]
UpperCamelCase_ = in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
UpperCamelCase_ = get_deta_config(UpperCamelCase_ )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
UpperCamelCase_ = torch.load(UpperCamelCase_ , map_location="cpu" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(UpperCamelCase_ , param.shape )
# rename keys
UpperCamelCase_ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_swin_q_k_v(UpperCamelCase_ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase_ , UpperCamelCase_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase_ = state_dict.pop(UpperCamelCase_ )
UpperCamelCase_ = val
if "input_proj" in key:
UpperCamelCase_ = state_dict.pop(UpperCamelCase_ )
UpperCamelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase_ = state_dict.pop(UpperCamelCase_ )
UpperCamelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCamelCase_ = DetaForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(UpperCamelCase_ )
# load image processor
UpperCamelCase_ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = processor(images=UpperCamelCase_ , return_tensors="pt" )
UpperCamelCase_ = encoding["""pixel_values"""]
UpperCamelCase_ = model(pixel_values.to(UpperCamelCase_ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase_ = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
UpperCamelCase_ = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
UpperCamelCase_ = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase_ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_UpperCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 360
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328
| 0
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCAmelCase_ : Tuple = numpy.array([0, 0])
UpperCAmelCase_ : Any = numpy.array([0.5, 0.8_6_6_0_2_5_4])
UpperCAmelCase_ : Tuple = numpy.array([1, 0])
UpperCAmelCase_ : Optional[int] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def SCREAMING_SNAKE_CASE_ ( __A : list[numpy.ndarray] , __A : int ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ : Tuple = initial_vectors
for _ in range(__A ):
a_ : Tuple = iteration_step(__A )
return vectors
def SCREAMING_SNAKE_CASE_ ( __A : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ : Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ : int = vectors[i + 1]
new_vectors.append(__A )
a_ : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def SCREAMING_SNAKE_CASE_ ( __A : numpy.ndarray , __A : float ) -> numpy.ndarray:
"""simple docstring"""
a_ : Tuple = numpy.radians(__A )
a_ , a_ : List[str] = numpy.cos(__A ), numpy.sin(__A )
a_ : Optional[int] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : list[numpy.ndarray] ) -> None:
"""simple docstring"""
a_ : int = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ : Any = zip(*__A )
plt.plot(__A , __A )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> List[Any]:
__lowercase : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Dict = parent
__lowercase : Dict = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Any = size
__lowercase : Any = do_normalize
__lowercase : int = image_mean
__lowercase : Tuple = image_std
def _lowerCamelCase ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> List[Any]:
# Initialize image_processing
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 249
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'wavlm'
def __init__(self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=320 , _lowerCamelCase=800 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=(512, 512, 512, 512, 1500) , _lowerCamelCase=(5, 3, 3, 1, 1) , _lowerCamelCase=(1, 2, 3, 1, 1) , _lowerCamelCase=512 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=None , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : str = feat_extract_norm
UpperCAmelCase__ : Optional[int] = feat_extract_activation
UpperCAmelCase__ : Optional[Any] = list(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = list(_lowerCamelCase )
UpperCAmelCase__ : List[str] = list(_lowerCamelCase )
UpperCAmelCase__ : str = conv_bias
UpperCAmelCase__ : Tuple = num_buckets
UpperCAmelCase__ : str = max_bucket_distance
UpperCAmelCase__ : Dict = num_conv_pos_embeddings
UpperCAmelCase__ : List[str] = num_conv_pos_embedding_groups
UpperCAmelCase__ : List[Any] = len(self.conv_dim )
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = hidden_dropout
UpperCAmelCase__ : Union[str, Any] = attention_dropout
UpperCAmelCase__ : Optional[int] = activation_dropout
UpperCAmelCase__ : Optional[int] = feat_proj_dropout
UpperCAmelCase__ : List[str] = final_dropout
UpperCAmelCase__ : Union[str, Any] = layerdrop
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Union[str, Any] = num_ctc_classes
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Optional[Any] = do_stable_layer_norm
UpperCAmelCase__ : Optional[int] = use_weighted_layer_sum
UpperCAmelCase__ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : Tuple = apply_spec_augment
UpperCAmelCase__ : str = mask_time_prob
UpperCAmelCase__ : Any = mask_time_length
UpperCAmelCase__ : str = mask_time_min_masks
UpperCAmelCase__ : List[Any] = mask_feature_prob
UpperCAmelCase__ : Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase__ : int = num_codevectors_per_group
UpperCAmelCase__ : List[Any] = num_codevector_groups
UpperCAmelCase__ : Optional[Any] = contrastive_logits_temperature
UpperCAmelCase__ : int = num_negatives
UpperCAmelCase__ : List[str] = codevector_dim
UpperCAmelCase__ : Tuple = proj_codevector_dim
UpperCAmelCase__ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase__ : List[str] = ctc_loss_reduction
UpperCAmelCase__ : str = ctc_zero_infinity
# adapter
UpperCAmelCase__ : List[Any] = add_adapter
UpperCAmelCase__ : Dict = adapter_kernel_size
UpperCAmelCase__ : Dict = adapter_stride
UpperCAmelCase__ : Optional[Any] = num_adapter_layers
UpperCAmelCase__ : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase__ : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ : Tuple = list(_lowerCamelCase )
UpperCAmelCase__ : Any = list(_lowerCamelCase )
UpperCAmelCase__ : List[str] = list(_lowerCamelCase )
UpperCAmelCase__ : Any = xvector_output_dim
@property
def _a (self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3FeatureExtractor"""]
_A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class a__ :
def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2 ):
"""simple docstring"""
__lowerCAmelCase = bp_numa
__lowerCAmelCase = bp_numa
__lowerCAmelCase = bp_numa
__lowerCAmelCase = conva_get[:2]
__lowerCAmelCase = conva_get[2]
__lowerCAmelCase = size_pa
__lowerCAmelCase = rate_w
__lowerCAmelCase = rate_t
__lowerCAmelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowerCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1
__lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
__lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(_A , "wb" ) as f:
pickle.dump(_A , _A )
print(f"""Model saved: {save_path}""" )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A ):
"""simple docstring"""
with open(_A , "rb" ) as f:
__lowerCAmelCase = pickle.load(_A ) # noqa: S301
__lowerCAmelCase = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
__lowerCAmelCase = model_dic.get("size_pooling1" )
__lowerCAmelCase = model_dic.get("num_bp1" )
__lowerCAmelCase = model_dic.get("num_bp2" )
__lowerCAmelCase = model_dic.get("num_bp3" )
__lowerCAmelCase = model_dic.get("rate_weight" )
__lowerCAmelCase = model_dic.get("rate_thre" )
# create model instance
__lowerCAmelCase = CNN(_A , _A , _A , _A , _A , _A , _A )
# modify model parameter
__lowerCAmelCase = model_dic.get("w_conv1" )
__lowerCAmelCase = model_dic.get("wkj" )
__lowerCAmelCase = model_dic.get("vji" )
__lowerCAmelCase = model_dic.get("thre_conv1" )
__lowerCAmelCase = model_dic.get("thre_bp2" )
__lowerCAmelCase = model_dic.get("thre_bp3" )
return conv_ins
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return round(_A , 3 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = convs[0]
__lowerCAmelCase = convs[1]
__lowerCAmelCase = np.shape(_A )[0]
# get the data slice of original image data, data_focus
__lowerCAmelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , _A ):
for j_focus in range(0 , size_data - size_conv + 1 , _A ):
__lowerCAmelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_A )
# calculate the feature map of every single kernel, and saved as list of matrix
__lowerCAmelCase = []
__lowerCAmelCase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_A ):
__lowerCAmelCase = []
for i_focus in range(len(_A ) ):
__lowerCAmelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_A ) )
__lowerCAmelCase = np.asmatrix(_A ).reshape(
_A , _A )
data_featuremap.append(_A )
# expanding the data slice to One dimenssion
__lowerCAmelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_A ) )
__lowerCAmelCase = np.asarray(_A )
return focus_list, data_featuremap
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A="average_pool" ):
"""simple docstring"""
__lowerCAmelCase = len(featuremaps[0] )
__lowerCAmelCase = int(size_map / size_pooling )
__lowerCAmelCase = []
for i_map in range(len(_A ) ):
__lowerCAmelCase = featuremaps[i_map]
__lowerCAmelCase = []
for i_focus in range(0 , _A , _A ):
for j_focus in range(0 , _A , _A ):
__lowerCAmelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_A ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_A ) )
__lowerCAmelCase = np.asmatrix(_A ).reshape(_A , _A )
featuremap_pooled.append(_A )
return featuremap_pooled
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
for i in range(len(_A ) ):
__lowerCAmelCase = np.shape(data[i] )
__lowerCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1] )
__lowerCAmelCase = data_listed.getA().tolist()[0]
data_expanded.extend(_A )
__lowerCAmelCase = np.asarray(_A )
return data_expanded
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = np.asarray(_A )
__lowerCAmelCase = np.shape(_A )
__lowerCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i_map in range(_A ):
__lowerCAmelCase = np.ones((size_map, size_map) )
for i in range(0 , _A , _A ):
for j in range(0 , _A , _A ):
__lowerCAmelCase = pd_pool[
i_pool
]
__lowerCAmelCase = i_pool + 1
__lowerCAmelCase = np.multiply(
_A , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_A )
return pd_all
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A=bool ):
"""simple docstring"""
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(_A )) )
print((" - - Shape: Teach_Data ", np.shape(_A )) )
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowerCAmelCase = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_A ) ):
# print('------------Learning Image: %d--------------'%p)
__lowerCAmelCase = np.asmatrix(datas_train[p] )
__lowerCAmelCase = np.asarray(datas_teach[p] )
__lowerCAmelCase , __lowerCAmelCase = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase = self.pooling(_A , self.size_poolinga )
__lowerCAmelCase = np.shape(_A )
__lowerCAmelCase = self._expand(_A )
__lowerCAmelCase = data_bp_input
__lowerCAmelCase = np.dot(_A , self.vji.T ) - self.thre_bpa
__lowerCAmelCase = self.sig(_A )
__lowerCAmelCase = np.dot(_A , self.wkj.T ) - self.thre_bpa
__lowerCAmelCase = self.sig(_A )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowerCAmelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa) ) )
__lowerCAmelCase = np.multiply(
np.dot(_A , self.wkj ) , np.multiply(_A , (1 - bp_outa) ) )
__lowerCAmelCase = np.dot(_A , self.vji )
__lowerCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowerCAmelCase = pd_conva_pooled.T.getA().tolist()
__lowerCAmelCase = self._calculate_gradient_from_pool(
_A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__lowerCAmelCase = self._expand_mat(pd_conva_all[k_conv] )
__lowerCAmelCase = self.rate_weight * np.dot(_A , _A )
__lowerCAmelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__lowerCAmelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__lowerCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowerCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowerCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre
__lowerCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowerCAmelCase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowerCAmelCase = rp + 1
__lowerCAmelCase = error_count / patterns
all_mse.append(_A )
def draw_error():
__lowerCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_A , "+-" )
plt.plot(_A , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(_A , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(_A )) )
for p in range(len(_A ) ):
__lowerCAmelCase = np.asmatrix(datas_test[p] )
__lowerCAmelCase , __lowerCAmelCase = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase = self.pooling(_A , self.size_poolinga )
__lowerCAmelCase = self._expand(_A )
__lowerCAmelCase = data_bp_input
__lowerCAmelCase = bp_outa * self.vji.T - self.thre_bpa
__lowerCAmelCase = self.sig(_A )
__lowerCAmelCase = bp_outa * self.wkj.T - self.thre_bpa
__lowerCAmelCase = self.sig(_A )
produce_out.extend(bp_outa.getA().tolist() )
__lowerCAmelCase = [list(map(self.do_round , _A ) ) for each in produce_out]
return np.asarray(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = np.asmatrix(_A )
__lowerCAmelCase , __lowerCAmelCase = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase = self.pooling(_A , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 92
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCamelCase__ = getattr(__snake_case ,__snake_case )
if weight_type is not None:
lowerCamelCase__ = getattr(__snake_case ,__snake_case ).shape
else:
lowerCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__ = None
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,hf_model.config.feat_extract_norm == '''group''' ,)
lowerCamelCase__ = True
elif name.split('''.''' )[0] == "proj":
lowerCamelCase__ = fairseq_model.proj
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase__ = mapped_key.replace('''*''' ,__snake_case )
if "weight_g" in name:
lowerCamelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ = '''weight_v'''
elif "bias" in name:
lowerCamelCase__ = '''bias'''
elif "weight" in name:
lowerCamelCase__ = '''weight'''
else:
lowerCamelCase__ = None
set_recursively(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ = name.split('''.''' )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__snake_case ,__snake_case ,bias=__snake_case )
lowerCamelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
with open(__snake_case ,'''r''' ,encoding='''utf-8''' ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.split(''' ''' )[0] for line in lines]
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = WavaVecaConfig.from_pretrained(__snake_case )
lowerCamelCase__ = SpeechaTextaConfig.from_pretrained(
__snake_case ,vocab_size=__snake_case ,decoder_layers=__snake_case ,do_stable_layer_norm=__snake_case )
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=__snake_case ,return_attention_mask=__snake_case ,)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCamelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__ = WavaVecaModel(__snake_case )
lowerCamelCase__ = recursively_load_weights_wavaveca(model.encoder ,__snake_case )
lowerCamelCase__ = SpeechaTextaForCausalLM(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCamelCase__ = SpeechEncoderDecoderModel(encoder=__snake_case ,decoder=__snake_case )
lowerCamelCase__ = False
# add projection layer
lowerCamelCase__ = nn.Parameter(projection_layer.weight )
lowerCamelCase__ = nn.Parameter(projection_layer.bias )
lowerCamelCase__ = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case ,'''vocab.json''' ) ,'''w''' ) as fp:
json.dump(__snake_case ,__snake_case )
lowerCamelCase__ = SpeechaTextaTokenizer(os.path.join(__snake_case ,'''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase__ = hf_wavavec.config.to_dict()
lowerCamelCase__ = tokenizer.pad_token_id
lowerCamelCase__ = tokenizer.bos_token_id
lowerCamelCase__ = tokenizer.eos_token_id
lowerCamelCase__ = '''speech_to_text_2'''
lowerCamelCase__ = '''wav2vec2'''
lowerCamelCase__ = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 209
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'microsoft/speecht5_tts'
_SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_SCREAMING_SNAKE_CASE : Optional[int] = 'text_reader'
_SCREAMING_SNAKE_CASE : Dict = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Dict = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : int = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : List[Any] = ['text']
_SCREAMING_SNAKE_CASE : Tuple = ['audio']
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.post_processor is None:
_lowercase : Optional[Any] = "microsoft/speecht5_hifigan"
super().setup()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase : Any = self.pre_processor(text=_UpperCamelCase , return_tensors="pt" , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
_lowercase : Optional[Any] = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
_lowercase : Dict = torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 199
|
'''simple docstring'''
from timeit import timeit
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A ( ) -> None:
def do_benchmark(snake_case ) -> None:
_lowercase : Optional[int] = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case ) = }''' )
_lowercase : int = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=snake_case )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }''' )
_lowercase : Optional[int] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=snake_case , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : List[str] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336
|
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
| 1
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_text_model'
def __init__( self , lowercase=250002 , lowercase=1024 , lowercase=24 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=514 , lowercase=1 , lowercase=0.02 , lowercase=0.02 , lowercase=1e-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=768 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_vision_model'
def __init__( self , lowercase=768 , lowercase=3072 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=224 , lowercase=32 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def UpperCamelCase ( cls , lowercase , **lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
A__ , A__ = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip'
__lowerCamelCase = True
def __init__( self , lowercase=None , lowercase=None , lowercase=768 , lowercase=2.6592 , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = kwargs.pop("text_config_dict" , lowercase )
A__ = kwargs.pop("vision_config_dict" , lowercase )
super().__init__(**lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(lowercase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**lowercase )
A__ = AltCLIPVisionConfig(**lowercase )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def UpperCamelCase ( cls , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 68
|
from __future__ import annotations
from collections.abc import Callable
def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float:
'''simple docstring'''
__UpperCamelCase = x_start
__UpperCamelCase = fnc(snake_case )
__UpperCamelCase = 0.0
for _ in range(snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__UpperCamelCase = (x_end - x_start) / steps + xa
__UpperCamelCase = fnc(snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__UpperCamelCase = xa
__UpperCamelCase = fxa
return area
if __name__ == "__main__":
def A_ ( snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowercase__ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 1_0
| 328
| 0
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: str ) -> str:
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__A , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__A , '''num_attention_heads''' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Union[str, Any] , __A: Union[str, Any] , __A: Optional[Any]=13 , __A: List[Any]=32 , __A: Optional[Any]=2 , __A: Optional[Any]=3 , __A: Optional[Any]=6_40 , __A: Union[str, Any]=4 , __A: Tuple="silu" , __A: Tuple=3 , __A: Union[str, Any]=32 , __A: Any=0.1 , __A: Optional[int]=0.1 , __A: Optional[int]=0.1 , __A: List[Any]=0.02 , __A: str=True , __A: Union[str, Any]=True , __A: Any=10 , __A: Optional[Any]=None , ) -> str:
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = last_hidden_size
_A = num_attention_heads
_A = hidden_act
_A = conv_kernel_size
_A = output_stride
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = classifier_dropout_prob
_A = use_labels
_A = is_training
_A = num_labels
_A = initializer_range
_A = scope
def __A ( self: Optional[Any] ) -> Any:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self: Optional[int] ) -> Optional[Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self: Optional[Any] , __A: Optional[Any] , __A: Optional[int] , __A: int , __A: Any ) -> Optional[Any]:
_A = MobileViTModel(config=__A )
model.to(__A )
model.eval()
_A = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self: Dict , __A: Tuple , __A: Optional[int] , __A: Union[str, Any] , __A: List[Any] ) -> Any:
_A = self.num_labels
_A = MobileViTForImageClassification(__A )
model.to(__A )
model.eval()
_A = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self: Dict , __A: Tuple , __A: Tuple , __A: int , __A: int ) -> Optional[Any]:
_A = self.num_labels
_A = MobileViTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
_A = model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_A = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
_A ,_A ,_A ,_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: Any ) -> List[str]:
_A = MobileViTModelTester(self )
_A = MobileViTConfigTester(self , config_class=__A , has_text_modality=__A )
def __A ( self: Any ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __A ( self: Tuple ) -> str:
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __A ( self: Any ) -> List[Any]:
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __A ( self: Union[str, Any] ) -> Optional[int]:
pass
def __A ( self: Optional[Any] ) -> List[str]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Tuple ) -> Union[str, Any]:
pass
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self: List[Any] ) -> Tuple:
def check_hidden_states_output(__A: Union[str, Any] , __A: str , __A: Dict ):
_A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
_A = outputs.hidden_states
_A = 5
self.assertEqual(len(__A ) , __A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_A = 2
for i in range(len(__A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__A , __A , __A )
def __A ( self: int ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __A ( self: str ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __A ( self: Union[str, Any] ) -> int:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MobileViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( ) -> int:
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: List[str] ) -> Optional[Any]:
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __A ( self: List[str] ) -> Optional[Any]:
_A = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
_A = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def __A ( self: Optional[int] ) -> List[str]:
_A = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_A = model.to(__A )
_A = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __A )
_A = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 ) )
@slow
def __A ( self: List[str] ) -> Tuple:
_A = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_A = model.to(__A )
_A = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)] )
_A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __A )
_A = image_processor.post_process_semantic_segmentation(outputs=__A )
_A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __A )
| 367
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "facebook/bart-large-mnli"
A_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A_ = "text_classifier"
A_ = AutoTokenizer
A_ = AutoModelForSequenceClassification
A_ = ["text", ["text"]]
A_ = ["text"]
def __A ( self: int ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str] ) -> int:
_A = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __A ( self: str , __A: List[Any] ) -> Union[str, Any]:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 75
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__A : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def _lowercase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = SpeechTaTokenizer(_lowerCAmelCase )
UpperCAmelCase = AddedToken('''<mask>''' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
UpperCAmelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''this is a test'''
UpperCAmelCase = '''this is a test'''
return input_text, output_text
def _lowercase ( self , _A , _A=False , _A=2_0 , _A=5 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.get_input_output_texts(_lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
return text, ids
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_lowerCAmelCase ) , 8_1 )
def _lowercase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase = tokenizer.add_tokens(_lowerCAmelCase )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase ) )
UpperCAmelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase = tokenizer.add_special_tokens(_lowerCAmelCase )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase ) )
UpperCAmelCase = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase = {
'''input_ids''': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowerCAmelCase , )
| 273
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """mgp-str"""
def __init__( self : int , _lowerCAmelCase : str=[3_2, 1_2_8] , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=2_7 , _lowerCAmelCase : List[str]=3_8 , _lowerCAmelCase : Tuple=5_0_2_5_7 , _lowerCAmelCase : str=3_0_5_2_2 , _lowerCAmelCase : Optional[int]=7_6_8 , _lowerCAmelCase : Optional[int]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[int]=4.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =max_token_length
__lowercase =num_character_labels
__lowercase =num_bpe_labels
__lowercase =num_wordpiece_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =mlp_ratio
__lowercase =distilled
__lowercase =layer_norm_eps
__lowercase =drop_rate
__lowercase =qkv_bias
__lowercase =attn_drop_rate
__lowercase =drop_path_rate
__lowercase =output_aa_attentions
__lowercase =initializer_range
| 166
| 0
|
import os
import pytest
from attr import dataclass
_UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str
_UpperCamelCase : Any = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_UpperCamelCase : List[str] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
_UpperCamelCase : Tuple = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def lowercase ( self: Tuple ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return f'''{self.framework}-transfromers-test'''
@property
def lowercase ( self: int ) -> str:
"""simple docstring"""
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowercase ( self: Dict ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Dict:
UpperCamelCase_ = SageMakerTestEnvironment(framework=request.cls.framework )
| 328
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328
| 1
|
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowerCamelCase , _lowerCamelCase : List[str] =y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def a_ ( ):
'''simple docstring'''
try:
_lowerCamelCase : Tuple =input('Enter two integers separated by comma (,): ' ).split(',' )
_lowerCamelCase : Any =int(nums[0] )
_lowerCamelCase : Dict =int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 199
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 5_00_03
lowerCamelCase = 5_00_02
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : int =PLBartTokenizer
UpperCamelCase__ : Dict =None
UpperCamelCase__ : Optional[Any] =False
def lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Any =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
_lowerCamelCase : Optional[int] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : str =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : List[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : str =tokenizer.vocab_size
_lowerCamelCase : List[str] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 4 , lowercase_ )]
self.assertListEqual(lowercase_ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
_lowerCamelCase : Optional[Any] ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Dict =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple =PLBartTokenizer(lowercase_ , language_codes='multi' , keep_accents=lowercase_ )
_lowerCamelCase : Any =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.vocab_size
_lowerCamelCase : Optional[int] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 7 , lowercase_ )]
self.assertListEqual(
lowercase_ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
_lowerCamelCase : int ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Any =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
UpperCamelCase__ : List[Any] ='uclanlp/plbart-python-en_XX'
UpperCamelCase__ : List[str] =[
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
UpperCamelCase__ : Optional[int] =[
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
UpperCamelCase__ : str =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase ( cls : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
_lowerCamelCase : Any =1
return cls
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
_lowerCamelCase : Dict =[EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCamelCase : Optional[int] =self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , lowercase_ )
_lowerCamelCase : Tuple =10
_lowerCamelCase : Optional[int] =self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =tempfile.mkdtemp()
_lowerCamelCase : Dict =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
_lowerCamelCase : Any =PLBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCamelCase : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
_lowerCamelCase : Dict =self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
_lowerCamelCase : List[str] =targets['input_ids']
_lowerCamelCase : Optional[Any] =shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 5_0003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0001,
} , )
| 199
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
A : Any = logging.get_logger(__name__)
A : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
A : Dict = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="replace" , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=False , _snake_case=True , **_snake_case , ) -> str:
'''simple docstring'''
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case , **_snake_case , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _snake_case ) != add_prefix_space:
__a = getattr(_snake_case , pre_tok_state.pop('''type''' ) )
__a = add_prefix_space
__a = pre_tok_class(**_snake_case )
__a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__a = '''post_processor'''
__a = getattr(self.backend_tokenizer , _snake_case , _snake_case )
if tokenizer_component_instance:
__a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a = tuple(state['''sep'''] )
if "cls" in state:
__a = tuple(state['''cls'''] )
__a = False
if state.get('''add_prefix_space''' , _snake_case ) != add_prefix_space:
__a = add_prefix_space
__a = True
if state.get('''trim_offsets''' , _snake_case ) != trim_offsets:
__a = trim_offsets
__a = True
if changes_to_apply:
__a = getattr(_snake_case , state.pop('''type''' ) )
__a = component_class(**_snake_case )
setattr(self.backend_tokenizer , _snake_case , _snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else value
__a = value
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> BatchEncoding:
'''simple docstring'''
__a = kwargs.get('''is_split_into_words''' , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> BatchEncoding:
'''simple docstring'''
__a = kwargs.get('''is_split_into_words''' , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
'''simple docstring'''
__a = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> Union[str, Any]:
'''simple docstring'''
__a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]:
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = PaddingStrategy.DO_NOT_PAD , _snake_case = None , _snake_case = None , ) -> dict:
'''simple docstring'''
__a = super()._pad(
encoded_inputs=_snake_case , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , )
# Load from model defaults
if return_attention_mask is None:
__a = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__a = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__a = len(encoded_inputs['''global_attention_mask'''] ) != len(_snake_case )
if needs_to_be_padded:
__a = len(_snake_case ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__a = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__a = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 33
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__snake_case = {'''UserAgent''': UserAgent().random}
def _A ( _lowercase ) -> dict:
"""simple docstring"""
__UpperCamelCase = script.contents[0]
__UpperCamelCase = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCamelCase :
def __init__( self: int,A_: int ):
'''simple docstring'''
__UpperCamelCase = F'''https://www.instagram.com/{username}/'''
__UpperCamelCase = self.get_json()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = requests.get(self.url,headers=A_ ).text
__UpperCamelCase = BeautifulSoup(A_,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: List[str] ):
'''simple docstring'''
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[str] ):
'''simple docstring'''
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return self.user_data["is_private"]
def _A ( _lowercase = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__UpperCamelCase = InstagramUser(_lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = InstagramUser('''github''')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__lowercase : int
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
_lowerCAmelCase = namedtuple('''CoinsDistribResult''', '''moves excess''')
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__SCREAMING_SNAKE_CASE ) != count_coins(__SCREAMING_SNAKE_CASE ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase__ : Optional[Any] = get_distrib(node.left )
lowerCAmelCase__ : Optional[int] = get_distrib(node.right )
lowerCAmelCase__ : Any = 1 - left_distrib_excess
lowerCAmelCase__ : str = 1 - right_distrib_excess
lowerCAmelCase__ : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(__SCREAMING_SNAKE_CASE )
+ abs(__SCREAMING_SNAKE_CASE )
)
lowerCAmelCase__ : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return get_distrib(__SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = 8
# DPR tok
lowerCAmelCase__ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__UpperCAmelCase )
rag_tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
lowerCAmelCase__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
lowerCAmelCase__ : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 184
| 0
|
"""simple docstring"""
__a = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 66
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
a_ : List[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
a_ : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12}
def a_ ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =set()
lowerCamelCase_ =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ =char
lowerCamelCase_ =set(__snake_case )
return pairs
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Dict =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="__start__", lowerCAmelCase="__end__", lowerCAmelCase="__unk__", lowerCAmelCase="__null__", **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, **lowerCAmelCase )
with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ =json.load(lowerCAmelCase )
lowerCamelCase_ ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase_ =[tuple(merge.split() ) for merge in merges]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={}
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ =re.sub('''([.,!?()])''', R''' \1''', lowerCAmelCase )
lowerCamelCase_ =re.sub('''(\')''', R''' \1 ''', lowerCAmelCase )
lowerCamelCase_ =re.sub(R'''\s{2,}''', ''' ''', lowerCAmelCase )
if "\n" in token:
lowerCamelCase_ =token.replace('''\n''', ''' __newln__''' )
lowerCamelCase_ =token.split(''' ''' )
lowerCamelCase_ =[]
for token in tokens:
if not len(lowerCAmelCase ):
continue
lowerCamelCase_ =token.lower()
lowerCamelCase_ =tuple(lowerCAmelCase )
lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCamelCase_ =get_pairs(lowerCAmelCase )
if not pairs:
words.append(lowerCAmelCase )
continue
while True:
lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_, lowerCamelCase_ =bigram
lowerCamelCase_ =[]
lowerCamelCase_ =0
while i < len(lowerCAmelCase ):
try:
lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase )
new_word.extend(word[i:j] )
lowerCamelCase_ =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ =tuple(lowerCAmelCase )
lowerCamelCase_ =new_word
if len(lowerCAmelCase ) == 1:
break
else:
lowerCamelCase_ =get_pairs(lowerCAmelCase )
lowerCamelCase_ ='''@@ '''.join(lowerCAmelCase )
lowerCamelCase_ =word[:-4]
lowerCamelCase_ =word
words.append(lowerCAmelCase )
return " ".join(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =re.findall(R'''\S+\n?''', lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =token.lower()
return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase, self.unk_token )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''' '''.join(lowerCAmelCase ).replace('''@@ ''', '''''' ).strip()
return out_string
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' )
lowerCamelCase_ =0
with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ =token_index
writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 75
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : int = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''xlm-roberta'''
def __init__( self , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=None , **A__ , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A__ : Optional[Any] = vocab_size
A__ : Optional[int] = hidden_size
A__ : str = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Union[str, Any] = hidden_act
A__ : Any = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : Any = type_vocab_size
A__ : Dict = initializer_range
A__ : Dict = layer_norm_eps
A__ : Tuple = position_embedding_type
A__ : List[str] = use_cache
A__ : Optional[int] = classifier_dropout
class _a (__magic_name__ ):
'''simple docstring'''
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 141
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'mctct'
def __init__( self , SCREAMING_SNAKE_CASE_=8065 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=36 , SCREAMING_SNAKE_CASE_=6144 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=384 , SCREAMING_SNAKE_CASE_=920 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.3 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=0.3 , SCREAMING_SNAKE_CASE_=0.3 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.3 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=(7,) , SCREAMING_SNAKE_CASE_=(3,) , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="sum" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , )-> Dict:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = layerdrop
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = conv_glu_dim
__UpperCamelCase = conv_dropout
__UpperCamelCase = num_conv_layers
__UpperCamelCase = input_feat_per_channel
__UpperCamelCase = input_channels
__UpperCamelCase = conv_channels
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__UpperCamelCase = list(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = list(SCREAMING_SNAKE_CASE_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 328
|
def A_ ( snake_case : list ) -> list:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
for i in range(1 , snake_case ):
__UpperCamelCase = collection[i]
__UpperCamelCase = 0
__UpperCamelCase = i - 1
while low <= high:
__UpperCamelCase = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
for j in range(snake_case , snake_case , -1 ):
__UpperCamelCase = collection[j - 1]
__UpperCamelCase = val
return collection
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : str = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 328
| 1
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
'''simple docstring'''
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = data
__lowerCAmelCase : int = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str) -> int:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFF_FF_FF_FF
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = b"\x80" + b"\x00" * (63 - (len(self.data) + 8) % 64)
__lowerCAmelCase : int = self.data + padding + struct.pack(">Q" , 8 * len(self.data))
return padded_data
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = list(struct.unpack(">16L" , _SCREAMING_SNAKE_CASE)) + [0] * 64
for i in range(16 , 80):
__lowerCAmelCase : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.padding()
__lowerCAmelCase : str = self.split_blocks()
for block in self.blocks:
__lowerCAmelCase : int = self.expand_block(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.h
for i in range(0 , 80):
if 0 <= i < 20:
__lowerCAmelCase : Dict = (b & c) | ((~b) & d)
__lowerCAmelCase : Optional[int] = 0X5A_82_79_99
elif 20 <= i < 40:
__lowerCAmelCase : Dict = b ^ c ^ d
__lowerCAmelCase : Optional[int] = 0X6E_D9_EB_A1
elif 40 <= i < 60:
__lowerCAmelCase : str = (b & c) | (b & d) | (c & d)
__lowerCAmelCase : Union[str, Any] = 0X8F_1B_BC_DC
elif 60 <= i < 80:
__lowerCAmelCase : Optional[Any] = b ^ c ^ d
__lowerCAmelCase : List[Any] = 0XCA_62_C1_D6
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = (
self.rotate(_SCREAMING_SNAKE_CASE , 5) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(_SCREAMING_SNAKE_CASE , 30),
c,
d,
)
__lowerCAmelCase : List[str] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h)
def _lowercase ( ) -> Optional[int]:
__lowerCAmelCase : List[Any] = B"Test String"
assert SHAaHash(__snake_case ).final_hash() == hashlib.shaa(__snake_case ).hexdigest() # noqa: S324
def _lowercase ( ) -> Dict:
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" ,dest="input_string" ,default="Hello World!! Welcome to Cryptography" ,help="Hash the string" ,)
parser.add_argument("--file" ,dest="input_file" ,help="Hash contents of a file" )
__lowerCAmelCase : Tuple = parser.parse_args()
__lowerCAmelCase : Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"rb" ) as f:
__lowerCAmelCase : str = f.read()
else:
__lowerCAmelCase : int = bytes(__snake_case ,"utf-8" )
print(SHAaHash(__snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 58
|
"""simple docstring"""
__snake_case : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case : Any = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 58
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
__A : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__A : Tuple = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def lowercase ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : List[Any] ):
for attribute in key.split('''.''' ):
lowercase_ : Union[str, Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase_ : List[str] = getattr(__snake_case , __snake_case ).shape
else:
lowercase_ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_g":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase_ : Optional[Any] = value
elif weight_type == "bias":
lowercase_ : Tuple = value
else:
lowercase_ : List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( __snake_case : Tuple , __snake_case : Dict ):
lowercase_ : str = []
lowercase_ : int = fairseq_model.state_dict()
lowercase_ : List[str] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowercase_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : Dict = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase_ : Optional[int] = True
if "*" in mapped_key:
lowercase_ : Optional[Any] = name.split(__snake_case )[0].split('''.''' )[-2]
lowercase_ : Optional[Any] = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowercase_ : List[str] = '''weight_g'''
elif "weight_v" in name:
lowercase_ : str = '''weight_v'''
elif "bias" in name:
lowercase_ : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase_ : str = '''weight'''
else:
lowercase_ : Union[str, Any] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[Any] ):
lowercase_ : Tuple = full_name.split('''conv_layers.''' )[-1]
lowercase_ : Union[str, Any] = name.split('''.''' )
lowercase_ : str = int(items[0] )
lowercase_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase_ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase_ : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase_ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=None , __snake_case : List[Any]=True ):
if config_path is not None:
lowercase_ : List[Any] = UniSpeechSatConfig.from_pretrained(__snake_case )
else:
lowercase_ : List[str] = UniSpeechSatConfig()
lowercase_ : Union[str, Any] = ''''''
if is_finetuned:
lowercase_ : List[Any] = UniSpeechSatForCTC(__snake_case )
else:
lowercase_ : Dict = UniSpeechSatForPreTraining(__snake_case )
lowercase_ , lowercase_ , lowercase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase_ : Any = model[0].eval()
recursively_load_weights(__snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 88 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = None , )-> Dict:
super().__init__()
lowerCamelCase_ =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , num_layers=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , cross_attention_dim=_SCREAMING_SNAKE_CASE , attention_bias=_SCREAMING_SNAKE_CASE , sample_size=_SCREAMING_SNAKE_CASE , num_vector_embeds=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , num_embeds_ada_norm=_SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase_ =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase_ =[77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase_ =[1, 0]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , )-> Optional[Any]:
lowerCamelCase_ =hidden_states
lowerCamelCase_ =[]
lowerCamelCase_ =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase_ =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase_ =self.transformer_index_for_condition[i]
lowerCamelCase_ =self.transformers[transformer_index](
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , cross_attention_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase_ =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase_ =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_SCREAMING_SNAKE_CASE )
| 49
|
__A : List[Any] = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
__A : int = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
__A : Any = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Dict = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
__A : List[str] = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
__A : List[str] = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
__A : Dict = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
__A : str = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 49
| 1
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCAmelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
lowerCAmelCase__ = f'https://www.google.com/search?q={query}&num=100'
lowerCAmelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
lowerCAmelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
lowerCAmelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 104
|
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
while a != 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = b % a, a
return b
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if gcd(_A , _A ) != 1:
lowerCamelCase__ : List[str] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 0, a
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCamelCase__ : Tuple = ua // va
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 184
| 0
|
"""simple docstring"""
__UpperCamelCase = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 312
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 1
|
'''simple docstring'''
import heapq
def __UpperCamelCase ( lowercase__ : dict ):
'''simple docstring'''
__lowercase =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase__, [-1 * len(lowercase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase =heapq.heappop(lowercase__ )[1][0]
chosen_vertices.add(lowercase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase =elem[1][1].index(lowercase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 141
|
'''simple docstring'''
UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 141
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__A = HUGGINGFACE_HUB_CACHE
__A = 'config.json'
__A = 'diffusion_pytorch_model.bin'
__A = 'diffusion_flax_model.msgpack'
__A = 'model.onnx'
__A = 'diffusion_pytorch_model.safetensors'
__A = 'weights.pb'
__A = 'https://huggingface.co'
__A = default_cache_path
__A = 'diffusers_modules'
__A = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__A = ['fp16', 'non-ema']
__A = '.self_attn'
| 370
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__A = input('Enter image url: ').strip()
print(f'Downloading image from {url} ...')
__A = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__A = soup.find('meta', {'property': 'og:image'})['content']
__A = requests.get(image_url).content
__A = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 75
| 0
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
lowercase_ = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
lowercase_ = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
lowercase_ = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def snake_case_( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def snake_case_( self , A , A , A=None ) -> Tuple:
return {
"matthews_correlation": float(matthews_corrcoef(A , A , sample_weight=A ) ),
}
| 58
|
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ) ->float:
if not arr:
return 0
_SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
_SCREAMING_SNAKE_CASE = 0.0
for num in arr:
_SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 58
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0
| 0
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = GPTaTokenizerFast.from_pretrained('''gpt2''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
__a = GPTaTokenizerFast.from_pretrained('''gpt2''')
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
try:
__a = tempfile.mktemp()
with open(__SCREAMING_SNAKE_CASE , '''wb''') as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , __SCREAMING_SNAKE_CASE)
__a = AlbertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
finally:
os.remove(__SCREAMING_SNAKE_CASE)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json'''):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''') as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , __SCREAMING_SNAKE_CASE)
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''')
@is_staging_test
class _A ( unittest.TestCase ):
UpperCamelCase__ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _lowerCamelCase ( cls : List[Any]):
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Optional[Any]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''')
except HTTPError:
pass
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizer(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id='''test-tokenizer''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizer(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def _lowerCamelCase ( self : Any):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = CustomTokenizer(__SCREAMING_SNAKE_CASE)
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__a = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE)
bert_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = CustomTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__a = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''')
__a = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = Trie()
trie.add('''Hello 友達''')
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
trie.add('''Hello''')
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS] This is a extra_id_100'''])
trie.add('''[CLS]''')
trie.add('''extra_id_1''')
trie.add('''extra_id_100''')
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''])
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Trie()
trie.add('''A''')
self.assertEqual(trie.split('''ABC''') , ['''A''', '''BC'''])
self.assertEqual(trie.split('''BCA''') , ['''BC''', '''A'''])
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Trie()
trie.add('''TOKEN]''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = Trie()
trie.add('''A''')
trie.add('''P''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = Trie()
trie.add('''AB''')
trie.add('''B''')
trie.add('''C''')
self.assertEqual(trie.split('''ABC''') , ['''AB''', '''C'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
trie.add('''ABC''')
trie.add('''B''')
trie.add('''CD''')
self.assertEqual(trie.split('''ABCD''') , ['''ABC''', '''D'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
__a = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3])
self.assertEqual(__SCREAMING_SNAKE_CASE , ['''AB''', '''C'''])
| 49
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 1
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCamelCase_ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Dict = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCAmelCase_ : str = os.path.abspath("examples" )
for item in os.listdir(lowerCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase_ : str = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase_ , feature_script=lowerCAmelCase_ , tested_section="main()" if parser_only else "training_function()" , ):
UpperCAmelCase_ : Any = compare_against_test(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = "\n".join(lowerCAmelCase_ )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase_ : Dict = diff.replace(lowerCAmelCase_ , "" )
self.assertEqual(lowerCAmelCase_ , "" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ )
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCAmelCase_ : List[Any] = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCamelCase_ (__A ):
__magic_name__ = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> Dict:
super().setUpClass()
UpperCAmelCase_ : List[str] = tempfile.mkdtemp()
UpperCAmelCase_ : Tuple = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ : str = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : str = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
UpperCAmelCase_ : Optional[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_ : List[str] = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
UpperCAmelCase_ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
self.assertNotIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
UpperCAmelCase_ : Tuple = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
UpperCAmelCase_ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
if torch.cuda.is_available():
UpperCAmelCase_ : Dict = torch.cuda.device_count()
else:
UpperCAmelCase_ : Dict = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
else:
self.assertIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : Tuple = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCAmelCase_ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = re.findall("({.+})" , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = [r for r in results if "accuracy" in r][-1]
UpperCAmelCase_ : Any = ast.literal_eval(lowerCAmelCase_ )
self.assertGreaterEqual(results["accuracy"] , 0.7_5 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : str = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase_ : Optional[int] = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "tracking" ) ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Any = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 366
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase_ : list[float] = list(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = degree
def __add__( self : int , lowerCAmelCase_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[str] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int | float ) -> int | float:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
UpperCAmelCase_ : str = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Union[str, Any] , lowerCAmelCase_ : object ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowerCAmelCase_ : object ) -> bool:
return not self.__eq__(lowerCAmelCase_ )
| 253
| 0
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = int(__UpperCamelCase )
A_ , A_ , A_ = t // 3600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict=300 ):
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A_ = f'''{elt:.6f}''' if isinstance(__UpperCamelCase ,__UpperCamelCase ) else str(__UpperCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _a :
"""simple docstring"""
_lowerCamelCase : List[str] = 5
_lowerCamelCase : List[str] = 0.2
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 300 , ):
A_ = total
A_ = "" if prefix is None else prefix
A_ = leave
A_ = parent
A_ = width
A_ = None
A_ = None
A_ = None
def __A ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ):
A_ = value
if comment is not None:
A_ = comment
if self.last_value is None:
A_ = A_ = time.time()
A_ = A_ = value
A_ = A_ = None
A_ = self.warmup
A_ = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
A_ = time.time()
A_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A_ = self.elapsed_time / (value - self.start_value)
else:
A_ = None
if value >= self.total:
A_ = self.total
A_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A_ = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
A_ = value
A_ = current_time
if self.average_time_per_item is None:
A_ = 1
else:
A_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Any=None ):
A_ = " " * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
A_ = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
A_ = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
A_ = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def __A ( self : str ):
A_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self : Dict ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=None ):
super().__init__(UpperCAmelCase )
A_ = None if column_names is None else [column_names]
A_ = None
def __A ( self : Any ):
A_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self : List[Any] , UpperCAmelCase : Optional[Any] ):
if self.inner_table is None:
A_ = [list(values.keys() ), list(values.values() )]
else:
A_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
A_ = columns
self.inner_table.append([values[c] for c in columns] )
def __A ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=300 ):
A_ = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def __A ( self : List[Any] ):
A_ = None
self.display()
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
A_ = None
A_ = None
A_ = False
def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ):
A_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
A_ = 0
A_ = 0
A_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
A_ = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , **UpperCAmelCase : Dict ):
A_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
A_ = False
def __A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str]=None , **UpperCAmelCase : Optional[Any] ):
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A_ = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
A_ = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
A_ = None
def __A ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
A_ = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def __A ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict=None , **UpperCAmelCase : Optional[Any] ):
if self.training_tracker is not None:
A_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
A_ = log["loss"]
break
if self.first_column == "Epoch":
A_ = int(state.epoch )
else:
A_ = state.global_step
A_ = "eval"
for k in metrics:
if k.endswith("_loss" ):
A_ = re.sub(R"\_loss$" , "" , UpperCAmelCase )
A_ = metrics.pop("total_flos" , UpperCAmelCase )
A_ = metrics.pop("epoch" , UpperCAmelCase )
A_ = metrics.pop(f'''{metric_key_prefix}_runtime''' , UpperCAmelCase )
A_ = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase )
A_ = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase )
A_ = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
A_ = v
else:
A_ = k.split("_" )
A_ = " ".join([part.capitalize() for part in splits[1:]] )
A_ = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
A_ = None
# Evaluation takes a long time so we should force the next update.
A_ = True
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , **UpperCAmelCase : Any ):
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase )
A_ = None
| 312
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def snake_case_ (__A : Any ) -> str:
__lowerCAmelCase : Tuple = args.pruning_method
__lowerCAmelCase : List[str] = args.threshold
__lowerCAmelCase : Dict = args.model_name_or_path.rstrip("""/""" )
__lowerCAmelCase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
__lowerCAmelCase : Any = torch.load(os.path.join(__A , """pytorch_model.bin""" ) )
__lowerCAmelCase : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCAmelCase : Dict = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__lowerCAmelCase : Optional[Any] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
__lowerCAmelCase : List[str] = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__lowerCAmelCase : str = MagnitudeBinarizer.apply(inputs=__A , threshold=__A )
__lowerCAmelCase : Dict = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCAmelCase : List[Any] = name[:-6]
__lowerCAmelCase : Any = model[f'''{prefix_}mask_scores''']
__lowerCAmelCase : Optional[Any] = TopKBinarizer.apply(__A , __A )
__lowerCAmelCase : int = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCAmelCase : Any = name[:-6]
__lowerCAmelCase : Dict = model[f'''{prefix_}mask_scores''']
__lowerCAmelCase : List[Any] = ThresholdBinarizer.apply(__A , __A , __A )
__lowerCAmelCase : List[str] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCAmelCase : str = name[:-6]
__lowerCAmelCase : Optional[Any] = model[f'''{prefix_}mask_scores''']
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = -0.1, 1.1
__lowerCAmelCase : str = torch.sigmoid(__A )
__lowerCAmelCase : Optional[int] = s * (r - l) + l
__lowerCAmelCase : Any = s_bar.clamp(min=0.0 , max=1.0 )
__lowerCAmelCase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__lowerCAmelCase : Optional[Any] = os.path.join(
os.path.dirname(__A ) , f'''bertarized_{os.path.basename(__A )}''' )
if not os.path.isdir(__A ):
shutil.copytree(__A , __A )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__A , os.path.join(__A , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 139
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : int=18 , lowerCAmelCase : int=30 , lowerCAmelCase : Optional[int]=4_00 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=True , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
__lowerCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase : str = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[str] = max_resolution
__lowerCAmelCase : List[Any] = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : List[Any] = do_center_crop
__lowerCAmelCase : Optional[Any] = crop_size
__lowerCAmelCase : int = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 139
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : Optional[Any] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ = 'time_series_transformer'
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Union[str, Any], lowerCamelCase : Optional[Any] = None, lowerCamelCase : Optional[Any] = None, lowerCamelCase : Any = "student_t", lowerCamelCase : str = "nll", lowerCamelCase : Any = 1, lowerCamelCase : List[str] = [1, 2, 3, 4, 5, 6, 7], lowerCamelCase : List[str] = "mean", lowerCamelCase : Dict = 0, lowerCamelCase : str = 0, lowerCamelCase : List[str] = 0, lowerCamelCase : Tuple = 0, lowerCamelCase : Optional[Any] = None, lowerCamelCase : List[Any] = None, lowerCamelCase : Dict = 32, lowerCamelCase : int = 32, lowerCamelCase : Dict = 2, lowerCamelCase : Union[str, Any] = 2, lowerCamelCase : Union[str, Any] = 2, lowerCamelCase : Optional[int] = 2, lowerCamelCase : Dict = True, lowerCamelCase : Dict = "gelu", lowerCamelCase : Optional[int] = 64, lowerCamelCase : Tuple = 0.1, lowerCamelCase : Dict = 0.1, lowerCamelCase : Union[str, Any] = 0.1, lowerCamelCase : int = 0.1, lowerCamelCase : Dict = 0.1, lowerCamelCase : Union[str, Any] = 100, lowerCamelCase : Optional[Any] = 0.02, lowerCamelCase : List[Any]=True, **lowerCamelCase : Optional[int], ):
'''simple docstring'''
lowercase__ = prediction_length
lowercase__ = context_length or prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = cardinality
else:
lowercase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(lowerCamelCase ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 207
|
'''simple docstring'''
def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in table:
res += inp[i - 1]
return res
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
return data[1:] + data[0]
def a_ ( __snake_case : str , __snake_case : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 )
lowerCamelCase_ =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =message[:4]
lowerCamelCase_ =message[4:]
lowerCamelCase_ =apply_table(__snake_case , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741
lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] )
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r
lowerCamelCase_ =apply_table(l + r , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
a_ : Any = input("""Enter 10 bit key: """)
a_ : Any = input("""Enter 8 bit message: """)
a_ : str = [6, 3, 7, 4, 8, 5, 10, 9]
a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ : str = [2, 4, 3, 1]
a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ : List[Any] = apply_table(key, paa_table)
a_ : str = temp[:5]
a_ : Optional[Any] = temp[5:]
a_ : Tuple = left_shift(left)
a_ : Optional[Any] = left_shift(right)
a_ : str = apply_table(left + right, pa_table)
a_ : Optional[Any] = left_shift(left)
a_ : Tuple = left_shift(right)
a_ : Union[str, Any] = left_shift(left)
a_ : List[str] = left_shift(right)
a_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
a_ : Optional[int] = apply_table(message, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : str = temp[4:] + temp[:4]
a_ : List[str] = function(expansion, sa, sa, keya, temp)
a_ : Union[str, Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ : Optional[int] = apply_table(CT, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : int = temp[4:] + temp[:4]
a_ : int = function(expansion, sa, sa, keya, temp)
a_ : Optional[int] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 75
| 0
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*lowercase__ : Any ,lowercase__ : Any=None ,lowercase__ : int=None ,**lowercase__ : List[str] ):
super().__init__(*lowercase__ ,**lowercase__ )
__lowercase = eval_examples
__lowercase = post_process_function
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int=None ,lowercase__ : int=None ,lowercase__ : List[Any]=None ,lowercase__ : str = "eval" ):
__lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase = self.get_eval_dataloader(lowercase__ )
__lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowercase = time.time()
try:
__lowercase = eval_loop(
lowercase__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowercase__ ,metric_key_prefix=lowercase__ ,)
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase__ ,lowercase__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase = self.post_process_function(lowercase__ ,lowercase__ ,output.predictions )
__lowercase = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
__lowercase = metrics.pop(lowercase__ )
metrics.update(output.metrics )
else:
__lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,lowercase__ )
return metrics
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any]=None ,lowercase__ : str = "test" ):
__lowercase = self.get_test_dataloader(lowercase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowercase = time.time()
try:
__lowercase = eval_loop(
lowercase__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowercase__ ,metric_key_prefix=lowercase__ ,)
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase__ ,lowercase__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase = self.post_process_function(lowercase__ ,lowercase__ ,output.predictions ,'''predict''' )
__lowercase = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
__lowercase = metrics.pop(lowercase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=lowercase__ )
| 52
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _A ( A__ ):
"""simple docstring"""
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _A ( A__ ):
"""simple docstring"""
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _A ( A__ ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
lowerCAmelCase__ = 1
while K:
lowerCAmelCase__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
lowerCAmelCase__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 52
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =3_84
UpperCAmelCase : Tuple =7
if "tiny" in model_name:
UpperCAmelCase : List[str] =96
UpperCAmelCase : Any =(2, 2, 6, 2)
UpperCAmelCase : List[str] =(3, 6, 12, 24)
elif "small" in model_name:
UpperCAmelCase : List[Any] =96
UpperCAmelCase : str =(2, 2, 18, 2)
UpperCAmelCase : List[str] =(3, 6, 12, 24)
elif "base" in model_name:
UpperCAmelCase : Dict =1_28
UpperCAmelCase : Tuple =(2, 2, 18, 2)
UpperCAmelCase : str =(4, 8, 16, 32)
UpperCAmelCase : int =12
UpperCAmelCase : int =5_12
elif "large" in model_name:
UpperCAmelCase : Optional[Any] =1_92
UpperCAmelCase : Any =(2, 2, 18, 2)
UpperCAmelCase : List[str] =(6, 12, 24, 48)
UpperCAmelCase : Tuple =12
UpperCAmelCase : int =7_68
# set label information
UpperCAmelCase : List[Any] =1_50
UpperCAmelCase : Dict ='''huggingface/label-files'''
UpperCAmelCase : List[str] ='''ade20k-id2label.json'''
UpperCAmelCase : List[str] =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Tuple ={int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Any ={v: k for k, v in idalabel.items()}
UpperCAmelCase : str =SwinConfig(
embed_dim=__lowerCAmelCase , depths=__lowerCAmelCase , num_heads=__lowerCAmelCase , window_size=__lowerCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase : Any =UperNetConfig(
backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =[]
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =dct.pop(__lowerCAmelCase )
UpperCAmelCase : Optional[Any] =val
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase : Tuple =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase : int =state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
UpperCAmelCase : Union[str, Any] =state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple =in_proj_weight[:dim, :]
UpperCAmelCase : List[Any] =in_proj_bias[: dim]
UpperCAmelCase : Union[str, Any] =in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase : int =in_proj_bias[
dim : dim * 2
]
UpperCAmelCase : Optional[int] =in_proj_weight[
-dim :, :
]
UpperCAmelCase : int =in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] =x.shape
UpperCAmelCase : str =x.reshape(__lowerCAmelCase , 4 , in_channel // 4 )
UpperCAmelCase : Tuple =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__lowerCAmelCase , __lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =x.shape
UpperCAmelCase : Dict =x.reshape(__lowerCAmelCase , in_channel // 4 , 4 )
UpperCAmelCase : Union[str, Any] =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__lowerCAmelCase , __lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =x.shape[0]
UpperCAmelCase : Optional[Any] =x.reshape(4 , in_channel // 4 )
UpperCAmelCase : Dict =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =x.shape[0]
UpperCAmelCase : int =x.reshape(in_channel // 4 , 4 )
UpperCAmelCase : Dict =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__lowerCAmelCase )
return x
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] ={
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
UpperCAmelCase : str =model_name_to_url[model_name]
UpperCAmelCase : Any =torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' , file_name=__lowerCAmelCase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__lowerCAmelCase , param.shape )
UpperCAmelCase : Optional[Any] =get_upernet_config(__lowerCAmelCase )
UpperCAmelCase : Optional[int] =UperNetForSemanticSegmentation(__lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase : int =state_dict.pop(__lowerCAmelCase )
if "bn" in key:
UpperCAmelCase : str =key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase : Any =val
# rename keys
UpperCAmelCase : Union[str, Any] =create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
UpperCAmelCase : str =reverse_correct_unfold_reduction_order(__lowerCAmelCase )
if "norm" in key:
UpperCAmelCase : int =reverse_correct_unfold_norm_order(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify on image
UpperCAmelCase : List[Any] ='''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase : str =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' )
UpperCAmelCase : Tuple =SegformerImageProcessor()
UpperCAmelCase : List[str] =processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase : Tuple =model(__lowerCAmelCase )
UpperCAmelCase : Optional[int] =outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
UpperCAmelCase : str =torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
UpperCAmelCase : List[str] =torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
UpperCAmelCase : str =torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
UpperCAmelCase : Tuple =torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0
| 0
|
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] = 200_0000 ):
'''simple docstring'''
UpperCamelCase__ = [0 for i in range(n + 1 )]
UpperCamelCase__ = 1
UpperCamelCase__ = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, UpperCamelCase__ ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
for i in range(UpperCamelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 366
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[Any] ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) )
self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) )
def A_ ( self : Tuple ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = get_activation('''gelu_10''' )
UpperCamelCase__ = torch_builtin(_a )
UpperCamelCase__ = geluaa(_a )
UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_a ):
get_activation('''bogus''' )
with self.assertRaises(_a ):
get_activation(_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = 1
UpperCamelCase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_a ):
UpperCamelCase__ = acta.a
| 35
| 0
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=100 , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=3 , ):
lowercase = parent
lowercase = vocab_size
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase = (image_size // patch_size) ** 2
lowercase = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = FlaxBeitModel(config=_SCREAMING_SNAKE_CASE )
lowercase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
lowercase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.type_sequence_label_size
lowercase = FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE )
lowercase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase = 1
lowercase = FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE )
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
lowercase
) = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = FlaxBeitModelTester(self )
lowercase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_SCREAMING_SNAKE_CASE )
lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(snake_case , **snake_case ):
return model(pixel_values=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
lowercase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
lowercase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
lowercase = np.ones((1, 196) , dtype=_SCREAMING_SNAKE_CASE )
# forward pass
lowercase = model(pixel_values=_SCREAMING_SNAKE_CASE , bool_masked_pos=_SCREAMING_SNAKE_CASE )
lowercase = outputs.logits
# verify the logits
lowercase = (1, 196, 8192)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
# forward pass
lowercase = model(**_SCREAMING_SNAKE_CASE )
lowercase = outputs.logits
# verify the logits
lowercase = (1, 1000)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase = 281
self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
# forward pass
lowercase = model(**_SCREAMING_SNAKE_CASE )
lowercase = outputs.logits
# verify the logits
lowercase = (1, 2_1841)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase = 2396
self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE )
| 195
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class _A ( __magic_name__ , __magic_name__):
SCREAMING_SNAKE_CASE : Dict = '''maskformer-swin'''
SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = embed_dim
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : Dict = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = num_heads
SCREAMING_SNAKE_CASE_ : List[Any] = window_size
SCREAMING_SNAKE_CASE_ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE_ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ : str = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
SCREAMING_SNAKE_CASE_ : List[str] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 253
| 0
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
super().__init__()
__lowercase= learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowercase= torch.zeros(lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= None
__lowercase= torch.nn.Parameter(lowerCAmelCase )
class A ( A_ ):
UpperCamelCase_ : VQModel
UpperCamelCase_ : CLIPTextModel
UpperCamelCase_ : CLIPTokenizer
UpperCamelCase_ : TransformeraDModel
UpperCamelCase_ : LearnedClassifierFreeSamplingEmbeddings
UpperCamelCase_ : VQDiffusionScheduler
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
super().__init__()
self.register_modules(
vqvae=lowerCAmelCase , transformer=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , scheduler=lowerCAmelCase , learned_classifier_free_sampling_embeddings=lowerCAmelCase , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= len(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else 1
# get prompt text embeddings
__lowercase= self.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__lowercase= text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowercase= text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase= self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowercase= prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
__lowercase= prompt_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowercase= self.learned_classifier_free_sampling_embeddings.embeddings
__lowercase= negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCAmelCase , 1 , 1 )
else:
__lowercase= [''] * batch_size
__lowercase= text_input_ids.shape[-1]
__lowercase= self.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='pt' , )
__lowercase= self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowercase= negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase= negative_prompt_embeds.shape[1]
__lowercase= negative_prompt_embeds.repeat(1 , lowerCAmelCase , 1 )
__lowercase= negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase= torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__(self , lowerCAmelCase , lowerCAmelCase = 1_0_0 , lowerCAmelCase = 5.0 , lowerCAmelCase = 1.0 , lowerCAmelCase = 1 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= 1
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase )}' )
__lowercase= batch_size * num_images_per_prompt
__lowercase= guidance_scale > 1.0
__lowercase= self._encode_prompt(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase , lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowerCAmelCase )}.' )
# get the initial completely masked latents unless the user supplied it
__lowercase= (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowercase= self.transformer.num_vector_embeds - 1
__lowercase= torch.full(lowerCAmelCase , lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowercase= latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase , device=self.device )
__lowercase= self.scheduler.timesteps.to(self.device )
__lowercase= latents
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
__lowercase= torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowercase= self.transformer(lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , timestep=lowerCAmelCase ).sample
if do_classifier_free_guidance:
__lowercase, __lowercase= model_output.chunk(2 )
__lowercase= model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCAmelCase , dim=1 , keepdim=lowerCAmelCase )
__lowercase= self.truncate(lowerCAmelCase , lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
__lowercase= model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase= self.scheduler.step(lowerCAmelCase , timestep=lowerCAmelCase , sample=lowerCAmelCase , generator=lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= self.vqvae.config.vq_embed_dim
__lowercase= (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowercase= self.vqvae.quantize.get_codebook_entry(lowerCAmelCase , shape=lowerCAmelCase )
__lowercase= self.vqvae.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase ).sample
__lowercase= (image / 2 + 0.5).clamp(0 , 1 )
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase= self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= torch.sort(lowerCAmelCase , 1 , descending=lowerCAmelCase )
__lowercase= torch.exp(lowerCAmelCase )
__lowercase= sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowercase= torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase )
__lowercase= torch.cat((all_true, keep_mask) , dim=1 )
__lowercase= keep_mask[:, :-1, :]
__lowercase= keep_mask.gather(1 , indices.argsort(1 ) )
__lowercase= log_p_x_0.clone()
__lowercase= -torch.inf # -inf = log(0)
return rv
| 304
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : Optional[int] = ['''pixel_values''']
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : int = 0.9 ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE:Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE:List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="crop_size" )
SCREAMING_SNAKE_CASE:Optional[Any] = do_resize
SCREAMING_SNAKE_CASE:List[Any] = size
SCREAMING_SNAKE_CASE:Tuple = crop_pct
SCREAMING_SNAKE_CASE:Tuple = resample
SCREAMING_SNAKE_CASE:List[str] = do_center_crop
SCREAMING_SNAKE_CASE:Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE:Dict = do_rescale
SCREAMING_SNAKE_CASE:int = rescale_factor
SCREAMING_SNAKE_CASE:Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE:Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE:Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[float] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE:Any = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
SCREAMING_SNAKE_CASE:List[str] = int(size["height"] / crop_pct )
else:
SCREAMING_SNAKE_CASE:int = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
else:
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE:Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=size["shortest_edge"] ,default_to_square=SCREAMING_SNAKE_CASE__ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE:str = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(SCREAMING_SNAKE_CASE__ ) )
return resize(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
SCREAMING_SNAKE_CASE:Any = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size["height"], size["width"]) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
SCREAMING_SNAKE_CASE:Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE:List[Any] = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE:Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE:Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE:str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE:List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE:int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE:Dict = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE:Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE:Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE:Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE:Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="crop_size" )
SCREAMING_SNAKE_CASE:List[str] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE:Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE:Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,crop_pct=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE:Any = [self.center_crop(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE:List[str] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE:List[str] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:int = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
| 139
|
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case , snake_case , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
import math
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 364
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_snake_case : int = True
except (ImportError, AttributeError):
_snake_case : int = object
def a_ ( *lowerCAmelCase_ : List[str], **lowerCAmelCase_ : Optional[Any] ):
pass
_snake_case : Union[str, Any] = False
_snake_case : int = logging.get_logger('transformers-cli/serving')
def a_ ( lowerCAmelCase_ : Namespace ):
__lowerCAmelCase = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(lowerCAmelCase_, args.host, args.port, args.workers )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser ) -> Union[str, Any]:
__lowerCAmelCase = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase_ , default=8_8_8_8 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : List[str] , lowerCAmelCase_ : Pipeline , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str:
__lowerCAmelCase = pipeline
__lowerCAmelCase = host
__lowerCAmelCase = port
__lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__lowerCAmelCase = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
] , timeout=6_0_0 , )
def lowercase ( self : Tuple ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self : Any ) -> List[str]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self : int , lowerCAmelCase_ : str = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Dict:
try:
__lowerCAmelCase = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
__lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
def lowercase ( self : int , lowerCAmelCase_ : List[int] = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Union[str, Any]:
try:
__lowerCAmelCase = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
async def lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any]=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> int:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCAmelCase = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_0_0 , {'error': str(lowerCAmelCase_ )} )
| 207
| 0
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowerCamelCase : Dict = logging.getLogger(__name__)
class A__ ( __snake_case ):
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Any = self.layer[current_layer](A_ , A_ , head_mask[current_layer] )
UpperCamelCase : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __snake_case , )
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Optional[int] = BertEncoderWithPabee(A_ )
self.init_weights()
UpperCamelCase : int = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : str = 0
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = threshold
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = patience
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[str] = 0
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCamelCase : Optional[int] = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(A_ )
@add_start_docstrings_to_model_forward(A_ )
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCamelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCamelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase : Optional[int] = torch.ones(A_ , device=A_ )
if token_type_ids is None:
UpperCamelCase : Optional[int] = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = encoder_hidden_states.size()
UpperCamelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCamelCase : Union[str, Any] = torch.ones(A_ , device=A_ )
UpperCamelCase : Any = self.invert_attention_mask(A_ )
else:
UpperCamelCase : Optional[int] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase : Any = self.get_head_mask(A_ , self.config.num_hidden_layers )
UpperCamelCase : Tuple = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
UpperCamelCase : Union[str, Any] = embedding_output
if self.training:
UpperCamelCase : List[str] = []
for i in range(self.config.num_hidden_layers ):
UpperCamelCase : Optional[Any] = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
UpperCamelCase : Optional[Any] = self.pooler(A_ )
UpperCamelCase : List[str] = output_layers[i](output_dropout(A_ ) )
res.append(A_ )
elif self.patience == 0: # Use all layers for inference
UpperCamelCase : Optional[int] = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase : Any = self.pooler(encoder_outputs[0] )
UpperCamelCase : int = [output_layers[self.config.num_hidden_layers - 1](A_ )]
else:
UpperCamelCase : Any = 0
UpperCamelCase : Tuple = None
UpperCamelCase : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCamelCase : Any = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
UpperCamelCase : Dict = self.pooler(A_ )
UpperCamelCase : Optional[Any] = output_layers[i](A_ )
if regression:
UpperCamelCase : int = logits.detach()
if patient_result is not None:
UpperCamelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCamelCase : Tuple = 0
else:
UpperCamelCase : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCamelCase : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A_ ) ):
patient_counter += 1
else:
UpperCamelCase : Dict = 0
UpperCamelCase : Optional[Any] = logits
if patient_counter == self.patience:
break
UpperCamelCase : List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __snake_case , )
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Tuple = config.num_labels
UpperCamelCase : Union[str, Any] = BertModelWithPabee(A_ )
UpperCamelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase : Optional[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = self.bert(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCamelCase : Union[str, Any] = (logits[-1],)
if labels is not None:
UpperCamelCase : Tuple = None
UpperCamelCase : Tuple = 0
for ix, logits_item in enumerate(A_ ):
if self.num_labels == 1:
# We are doing regression
UpperCamelCase : List[Any] = MSELoss()
UpperCamelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase : Any = CrossEntropyLoss()
UpperCamelCase : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCamelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCamelCase : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 52
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
__lowerCamelCase : str = 100
__lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__lowerCamelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A_ ( _lowerCAmelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCamelCase : set[int] = set()
UpperCamelCase : int
UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A_ ( _lowerCAmelCase = 5000 ) -> int | None:
for number_to_partition in range(1 , _lowerCAmelCase ):
if len(partition(_lowerCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase_ = '▁'
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : Optional[Any] = BarthezTokenizer
def __init__( self : int , _A : int=None , _A : List[Any]=None , _A : str="<s>" , _A : Dict="</s>" , _A : List[Any]="</s>" , _A : Union[str, Any]="<s>" , _A : List[str]="<unk>" , _A : Dict="<pad>" , _A : int="<mask>" , **_A : str , ) -> Tuple:
"""simple docstring"""
lowercase : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , **_A , )
lowercase : Union[str, Any] = vocab_file
lowercase : List[str] = False if not self.vocab_file else True
def __a ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : List[Any] = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Union[str, Any] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 116
|
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase_ = os.path.join(git_repo_path, 'src', 'diffusers')
class _A ( unittest.TestCase ):
def __a ( self : Any ) -> str:
"""simple docstring"""
lowercase : List[str] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowercase : str = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowercase : str = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_A , '''torch_and_transformers_and_onnx''' )
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _A )
self.assertIn('''torch_and_transformers''' , _A )
self.assertIn('''flax_and_transformers''' , _A )
self.assertIn('''torch_and_transformers_and_onnx''' , _A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_A , '''\nCONSTANT = None\n''' )
lowercase : List[Any] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowercase : Tuple = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
lowercase : List[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_A , _A )
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : Optional[int] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
lowercase : int = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _A )
| 116
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
'''simple docstring'''
from math import factorial
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = real
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = [1] * rank
else:
_lowerCAmelCase : str = rank
def __repr__( self ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(snake_case__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case__ )
def __add__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return Dual(self.real + other , self.duals )
_lowerCAmelCase : Tuple = self.duals.copy()
_lowerCAmelCase : Optional[int] = other.duals.copy()
if len(snake_case__ ) > len(snake_case__ ):
o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
elif len(snake_case__ ) < len(snake_case__ ):
s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
_lowerCAmelCase : int = []
for i in range(len(snake_case__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case__ )
__magic_name__ = __add__
def __sub__( self , snake_case__ ):
'''simple docstring'''
return self + other * -1
def __mul__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case__ )
_lowerCAmelCase : Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case__ )
__magic_name__ = __mul__
def __truediv__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case__ )
raise ValueError
def __floordiv__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case__ )
raise ValueError
def __pow__( self , snake_case__ ):
'''simple docstring'''
if n < 0 or isinstance(snake_case__ , snake_case__ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_lowerCAmelCase : Dict = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase (_A , _A , _A ):
"""simple docstring"""
if not callable(_UpperCAmelCase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(_UpperCAmelCase , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('differentiate() requires an int as input for order' )
_lowerCAmelCase : str = Dual(_UpperCAmelCase , 1 )
_lowerCAmelCase : Tuple = func(_UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase (_A ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 358
|
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __UpperCAmelCase ( A : list[float] ) -> Tuple:
return np.maximum(0 , A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A = logging.get_logger(__name__)
A = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''longformer'''
def __init__( self , _UpperCAmelCase = 512 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0 , _UpperCAmelCase = 2 , _UpperCAmelCase = 30522 , _UpperCAmelCase = 768 , _UpperCAmelCase = 12 , _UpperCAmelCase = 12 , _UpperCAmelCase = 3072 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 512 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = False , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : int = attention_window
__a : Tuple = sep_token_id
__a : Union[str, Any] = bos_token_id
__a : int = eos_token_id
__a : int = vocab_size
__a : Any = hidden_size
__a : Tuple = num_hidden_layers
__a : str = num_attention_heads
__a : List[Any] = hidden_act
__a : Any = intermediate_size
__a : List[str] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : Any = type_vocab_size
__a : Any = initializer_range
__a : str = layer_norm_eps
__a : Any = onnx_export
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = True
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowerCamelCase ( self ):
__a : Tuple = super().outputs
if self.task == "default":
__a : List[str] = {0: '''batch'''}
return outputs
@property
def _lowerCamelCase ( self ):
return 1e-4
@property
def _lowerCamelCase ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
__a : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__a : Optional[int] = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__a : Dict = 1
return inputs
| 353
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
__lowerCAmelCase = '''image_segmenter'''
__lowerCAmelCase = CLIPSegForImageSegmentation
__lowerCAmelCase = ['''image''', '''text''']
__lowerCAmelCase = ['''image''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='''pt''' )
def _lowerCamelCase ( self , _UpperCAmelCase ):
with torch.no_grad():
__a : List[str] = self.model(**_UpperCAmelCase ).logits
return logits
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = outputs.cpu().detach().numpy()
__a : int = 0
__a : Optional[int] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 188
| 0
|
'''simple docstring'''
def a ( __a , __a ) -> str:
'''simple docstring'''
if not isinstance(__a , __a ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__a , __a ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
UpperCamelCase__ :Optional[int] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, *lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Any=None, **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowercase__ ( self : int, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str = "eval" ):
'''simple docstring'''
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(lowerCamelCase )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase )
return metrics
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int=None, lowerCamelCase : str = "test" ):
'''simple docstring'''
lowercase__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions, '''predict''' )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase )
| 207
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : List[str] = {"vocab_file": "spiece.model"}
lowercase : Optional[Any] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowercase : int = "▁"
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : int = PRETRAINED_VOCAB_FILES_MAP
lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
__UpperCamelCase : int = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
__UpperCamelCase : Tuple = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : str = keep_accents
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.__dict__.copy()
__UpperCamelCase : Dict = None
return state
def __setstate__( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : List[Any] = {}
__UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if self.remove_space:
__UpperCamelCase : List[str] = " ".join(inputs.strip().split() )
else:
__UpperCamelCase : Union[str, Any] = inputs
__UpperCamelCase : Tuple = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase : str = unicodedata.normalize("NFKD" , __UpperCamelCase )
__UpperCamelCase : Any = "".join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
__UpperCamelCase : str = outputs.lower()
return outputs
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Tuple = self.preprocess_text(__UpperCamelCase )
__UpperCamelCase : List[str] = self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
__UpperCamelCase : Optional[int] = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : int = cur_pieces[1:]
else:
__UpperCamelCase : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def __lowerCamelCase ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[str] = []
__UpperCamelCase : List[str] = ""
__UpperCamelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = []
else:
current_sub_tokens.append(__UpperCamelCase )
__UpperCamelCase : Tuple = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Dict = [self.sep_token_id]
__UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase : Union[str, Any] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 171
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[Any] = 'canine'
def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1_63_84 , __UpperCamelCase=16 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase=0Xe000 , __UpperCamelCase=0Xe001 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=8 , __UpperCamelCase=1_63_84 , __UpperCamelCase=1_28 , **__UpperCamelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Optional[int] = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Dict = hidden_dropout_prob
__UpperCamelCase : List[str] = attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : List[str] = type_vocab_size
__UpperCamelCase : str = layer_norm_eps
# Character config:
__UpperCamelCase : str = downsampling_rate
__UpperCamelCase : Tuple = upsampling_kernel_size
__UpperCamelCase : Union[str, Any] = num_hash_functions
__UpperCamelCase : List[str] = num_hash_buckets
__UpperCamelCase : List[Any] = local_transformer_stride
| 171
| 1
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A : Union[str, Any] = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A : List[Any] = sorted(_lowerCAmelCase )
# declaring useful variables
A : str = len(_lowerCAmelCase )
A : str = 0
A : int = 0
A : str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A : Dict = sorted_profit_by_weight[length - i - 1]
A : Optional[Any] = profit_by_weight.index(_lowerCAmelCase )
A : List[str] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:Union[str, Any] = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:Tuple = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 116
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
| 116
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _A (__a ) -> YolosConfig:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE_ : List[Any] = 1_92
SCREAMING_SNAKE_CASE_ : Any = 7_68
SCREAMING_SNAKE_CASE_ : Tuple = 12
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : List[str] = [8_00, 13_33]
SCREAMING_SNAKE_CASE_ : List[Any] = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE_ : int = 3_30
SCREAMING_SNAKE_CASE_ : Optional[int] = 14
SCREAMING_SNAKE_CASE_ : int = 6
SCREAMING_SNAKE_CASE_ : str = 13_20
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_84
SCREAMING_SNAKE_CASE_ : List[Any] = 15_36
SCREAMING_SNAKE_CASE_ : Dict = 12
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [8_00, 13_44]
SCREAMING_SNAKE_CASE_ : List[Any] = 91
SCREAMING_SNAKE_CASE_ : str = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE_ : Any = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Dict = idalabel
SCREAMING_SNAKE_CASE_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _A (__a , __a , __a = False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_ : str = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def _A (__a ) -> str:
"""simple docstring"""
if "backbone" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _A (__a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Dict = orig_state_dict.pop(__a )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.split('''.''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(key_split[2] )
SCREAMING_SNAKE_CASE_ : str = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ : int = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : List[str] = val[:dim]
SCREAMING_SNAKE_CASE_ : Any = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : Dict = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return orig_state_dict
def _A () -> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : Tuple = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def _A (__a , __a , __a , __a = False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_yolos_config(__a )
# load original state_dict
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(__a , map_location='''cpu''' )['''model''']
# load 🤗 model
SCREAMING_SNAKE_CASE_ : int = YolosForObjectDetection(__a )
model.eval()
SCREAMING_SNAKE_CASE_ : int = convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE_ : List[str] = 8_00 if yolos_name != '''yolos_ti''' else 5_12
SCREAMING_SNAKE_CASE_ : Tuple = YolosImageProcessor(format='''coco_detection''' , size=__a )
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if push_to_hub:
SCREAMING_SNAKE_CASE_ : List[str] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
SCREAMING_SNAKE_CASE_ : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization='''hustvl''' )
model.push_to_hub(__a , organization='''hustvl''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 318
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCamelCase : str = random.Random()
if is_torch_available():
import torch
def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
if rng is None:
UpperCamelCase : Optional[int] = global_rng
UpperCamelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ):
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[Any] = min_seq_length
UpperCamelCase : List[str] = max_seq_length
UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Union[str, Any] = feature_size
UpperCamelCase : List[str] = padding_value
UpperCamelCase : Optional[Any] = sampling_rate
UpperCamelCase : List[str] = return_attention_mask
UpperCamelCase : List[Any] = do_normalize
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = ASTFeatureExtractor
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ASTFeatureExtractionTester(self )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : int = np.asarray(A_ )
UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values
UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
UpperCamelCase : List[Any] = self._load_datasamples(1 )
UpperCamelCase : Tuple = ASTFeatureExtractor()
UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
| 52
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase = StableDiffusionSAGPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCamelCase = CLIPTextModel(__UpperCAmelCase )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__UpperCamelCase = sag_pipe.to(__UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = '.'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase = sag_pipe.to(__UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = '.'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase = sag_pipe.to(__UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = '.'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=__UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
__UpperCamelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 350
|
"""simple docstring"""
def A ( snake_case :int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCamelCase : Union[str, Any] = int(input("Enter number: ").strip())
print(f'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 263
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.