code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = name
__SCREAMING_SNAKE_CASE : Any = value
__SCREAMING_SNAKE_CASE : List[Any] = weight
def __repr__( self :List[Any] ) -> List[str]:
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def __magic_name__( self :Union[str, Any] ) -> List[str]:
return self.value
def __magic_name__( self :Any ) -> Dict:
return self.name
def __magic_name__( self :str ) -> int:
return self.weight
def __magic_name__( self :Optional[int] ) -> Tuple:
return self.value / self.weight
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(lowercase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = sorted(lowercase__ , key=lowercase__ , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = 0.0, 0.0
for i in range(len(lowercase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _snake_case ( a__ ):
snake_case__ = 42
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase : Tuple[int] = (64,) , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "silu" , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 256 , UpperCAmelCase : int = 32 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : float = 0.1_8_2_1_5 , UpperCAmelCase : str = "group" , ):
super().__init__()
# pass init params to Encoder
__lowerCamelCase : Tuple = Encoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , )
__lowerCamelCase : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCamelCase : List[str] = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
__lowerCamelCase : List[Any] = VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.2_5 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
# pass init params to Decoder
__lowerCamelCase : int = Decoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , )
@apply_forward_hook
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ):
__lowerCamelCase : List[Any] = self.encoder(UpperCAmelCase )
__lowerCamelCase : int = self.quant_conv(UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ):
# also go through quantization layer
if not force_not_quantize:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = self.quantize(UpperCAmelCase )
else:
__lowerCamelCase : Optional[Any] = h
__lowerCamelCase : int = self.post_quant_conv(UpperCAmelCase )
__lowerCamelCase : str = self.decoder(UpperCAmelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ):
__lowerCamelCase : Dict = sample
__lowerCamelCase : Optional[Any] = self.encode(UpperCAmelCase ).latents
__lowerCamelCase : Optional[Any] = self.decode(UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
| 135
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Union[str, Any] = LEDConfig
snake_case__ : Any = {}
snake_case__ : int = '''gelu'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=1_3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : int=3_7 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Dict=2_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=4 , ) -> List[str]:
a_ : Tuple = parent
a_ : Dict = batch_size
a_ : Tuple = seq_length
a_ : Optional[Any] = is_training
a_ : Tuple = use_labels
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : Any = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Dict = max_position_embeddings
a_ : int = eos_token_id
a_ : Any = pad_token_id
a_ : Optional[int] = bos_token_id
a_ : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a_ : str = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a_ : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a_ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a_ : Union[str, Any] = prepare_led_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = tf.concat(
[tf.zeros_like(SCREAMING_SNAKE_CASE__ )[:, :-1], tf.ones_like(SCREAMING_SNAKE_CASE__ )[:, -1:]] , axis=-1 , )
a_ : Union[str, Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
a_ : Dict = TFLEDModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
a_ : Union[str, Any] = inputs_dict['input_ids']
a_ : Tuple = input_ids[:1, :]
a_ : Union[str, Any] = inputs_dict['attention_mask'][:1, :]
a_ : Tuple = 1
# first forward pass
a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
a_ , a_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
a_ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
a_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a_ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
a_ : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-3 )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Union[str, Any] , __A : Optional[int] , __A : Optional[int]=None , __A : Dict=None , __A : Union[str, Any]=None , __A : str=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
a_ : Any = tf.cast(tf.math.not_equal(__A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a_ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a_ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case__ : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : str = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : str = True
snake_case__ : List[str] = False
snake_case__ : Any = False
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : List[str] = TFLEDModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Tuple = tf.zeros_like(inputs_dict['attention_mask'] )
a_ : Optional[int] = 2
a_ : int = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
a_ : Dict = True
a_ : Union[str, Any] = self.model_tester.seq_length
a_ : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ : Any ):
a_ : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
a_ : Any = [t.numpy() for t in outputs.encoder_attentions]
a_ : Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a_ : Optional[int] = True
a_ : List[str] = False
a_ : int = False
a_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Any = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : Any = len(SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
if self.is_encoder_decoder:
a_ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a_ : str = True
a_ : str = model_class(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
a_ : str = True
a_ : Optional[int] = True
a_ : Any = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
# TODO: Head-masking not yet implement
pass
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(__A , dtype=tf.intaa )
UpperCAmelCase_ : Union[str, Any] = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : Tuple = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
a_ : str = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a_ : int = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a_ : Tuple = prepare_led_inputs_dict(model.config , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model(**SCREAMING_SNAKE_CASE__ )[0]
a_ : str = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# change to expected output here
a_ : str = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : str = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
a_ : List[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a_ : Any = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a_ : List[Any] = prepare_led_inputs_dict(model.config , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model(**SCREAMING_SNAKE_CASE__ )[0]
a_ : int = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# change to expected output here
a_ : str = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 , rtol=1E-3 )
| 120
|
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:int = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
SCREAMING_SNAKE_CASE__:str = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__:Dict = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
SCREAMING_SNAKE_CASE__:str = {F'''funnel-transformer/{name}''': {"""do_lower_case""": True} for name in _model_names}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_INIT_CONFIGURATION
_snake_case : Union[str, Any] = FunnelTokenizer
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = 2
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="<unk>" , lowerCamelCase="<sep>" , lowerCamelCase="<pad>" , lowerCamelCase="<cls>" , lowerCamelCase="<mask>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase="##" , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , clean_text=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , wordpieces_prefix=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase ) != tokenize_chinese_chars
):
__a = getattr(lowerCamelCase , normalizer_state.pop("type" ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**lowerCamelCase )
__a = do_lower_case
def a__ ( self , lowerCamelCase , lowerCamelCase=None ):
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 261
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261
| 1
|
__A : str = '''Tobias Carryer'''
from time import time
class __A :
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=int(time() ) ): # noqa: B008
lowerCAmelCase : Optional[int] = multiplier
lowerCAmelCase : Dict = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[int] = seed
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[str] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__A : Optional[Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 355
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError('This matrix has no inverse.')
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE = [[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError('This matrix has no inverse.')
# Creating cofactor matrix
SCREAMING_SNAKE_CASE = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
SCREAMING_SNAKE_CASE = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
SCREAMING_SNAKE_CASE = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE = array(_UpperCAmelCase)
for i in range(3):
for j in range(3):
SCREAMING_SNAKE_CASE = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE = array(_UpperCAmelCase)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(_UpperCAmelCase)
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.')
| 137
|
import os
from datetime import datetime as dt
from github import Github
a_ : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Github(os.environ['GITHUB_TOKEN'])
SCREAMING_SNAKE_CASE = g.get_repo('huggingface/diffusers')
SCREAMING_SNAKE_CASE = repo.get_issues(state='open')
for issue in open_issues:
SCREAMING_SNAKE_CASE = sorted(issue.get_comments() , key=lambda _UpperCAmelCase: i.created_at , reverse=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = comments[0] if len(_UpperCAmelCase) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed')
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open')
issue.remove_from_labels('stale')
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.')
issue.add_to_labels('stale')
if __name__ == "__main__":
main()
| 137
| 1
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Any , _lowerCamelCase: str ) -> List[Any]:
'''simple docstring'''
if len(_lowercase ) != len(_lowercase ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowerCamelCase : List[str] = [p / w for p, w in zip(_lowercase , _lowercase )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowerCamelCase : int = sorted(_lowercase )
# declaring useful variables
__lowerCamelCase : Any = len(_lowercase )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Any = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowerCamelCase : str = sorted_profit_by_weight[length - i - 1]
__lowerCamelCase : Dict = profit_by_weight.index(_lowercase )
__lowerCamelCase : Optional[int] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
__A = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
__A = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
__A = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 358
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64
| 0
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_A : int = logging.get_logger(__name__)
@dataclass
class a__ :
__lowerCAmelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
__lowerCAmelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__lowerCAmelCase = field(
default=128, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
__lowerCAmelCase = field(
default=UpperCamelCase_, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __magic_name__ ( self ):
lowercase : List[str] = self.task_name.lower()
class a__ ( UpperCamelCase_ ):
__lowerCAmelCase = """train"""
__lowerCAmelCase = """dev"""
__lowerCAmelCase = """test"""
class a__ ( UpperCamelCase_ ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , A_ , )
lowercase : Dict = args
lowercase : Optional[int] = glue_processors[args.task_name]()
lowercase : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(A_ , A_ ):
try:
lowercase : Optional[Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
lowercase : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
lowercase : Optional[int] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase : Dict = label_list[2], label_list[1]
lowercase : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase : Union[str, Any] = cached_features_file + '''.lock'''
with FileLock(A_ ):
if os.path.exists(A_ ) and not args.overwrite_cache:
lowercase : str = time.time()
lowercase : int = torch.load(A_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
lowercase : Tuple = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowercase : Tuple = self.processor.get_test_examples(args.data_dir )
else:
lowercase : Optional[int] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowercase : Tuple = examples[:limit_length]
lowercase : Any = glue_convert_examples_to_features(
A_ , A_ , max_length=args.max_seq_length , label_list=A_ , output_mode=self.output_mode , )
lowercase : Optional[int] = time.time()
torch.save(self.features , A_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , _a ):
return self.features[i]
def __magic_name__ ( self ):
return self.label_list
| 202
|
A__ : Any = '''Tobias Carryer'''
from time import time
class __snake_case :
def __init__( self : Any , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : str=int(time())): # noqa: B008
lowerCAmelCase_ : int = multiplier
lowerCAmelCase_ : int = increment
lowerCAmelCase_ : str = modulo
lowerCAmelCase_ : str = seed
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A__ : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 103
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : List[str] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['ConvNextFeatureExtractor']
__A : Optional[int] = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 8
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,):
snake_case_ : str = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : List[str] = use_labels
snake_case_ : int = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = projection_dim
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : int = dropout
snake_case_ : int = attention_dropout
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Dict = scope
snake_case_ : Union[str, Any] = bos_token_id
def a__ ( self :Any ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Union[str, Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case_ : int = input_mask.numpy()
snake_case_ , snake_case_ : Tuple = input_mask.shape
snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = 0
snake_case_ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def a__ ( self :str ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ):
snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase )
snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
lowercase : int = False
lowercase : List[Any] = False
lowercase : Dict = False
def a__ ( self :List[Any] ):
snake_case_ : List[str] = BlipTextModelTester(self )
snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Tuple ):
pass
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def a__ ( self :Any ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :List[Any] ):
pass
@slow
def a__ ( self :Any ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
| 8
| 1
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def UpperCamelCase_ ( A__ : str = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def UpperCamelCase_ ( A__ : str = "" ):
'''simple docstring'''
if len(A__ ) == 0:
return True
lowerCAmelCase_ : Optional[int] = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase_ : dict[str, int] = {}
for character in lower_case_input_str:
lowerCAmelCase_ : Dict = character_freq_dict.get(A__ , 0 ) + 1
lowerCAmelCase_ : Optional[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase_ ( A__ : str = "" ):
'''simple docstring'''
print("""\nFor string = """ , A__ , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(A__ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(A__ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
__A : Dict = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__A : int = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 120
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCAmelCase_ : int = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCamelCase , cache_dir=lowerCamelCase )
lowerCAmelCase_ : Dict = [t[-1] for t in os.walk(os.path.join(lowerCamelCase , os.listdir(lowerCamelCase )[0] , """snapshots""" ) )]
lowerCAmelCase_ : List[Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Any ) -> Optional[int]:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCamelCase )
lowerCAmelCase_ : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : Tuple = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Dict = jax.device_count()
lowerCAmelCase_ : Tuple = num_samples * [prompt]
lowerCAmelCase_ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : Optional[Any] = replicate(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = shard(lowerCamelCase )
lowerCAmelCase_ : str = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
lowerCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_, lowerCAmelCase_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCamelCase )
lowerCAmelCase_ : Dict = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[Any] = 50
lowerCAmelCase_ : str = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : Dict = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : Optional[Any] = replicate(lowerCamelCase )
lowerCAmelCase_ : str = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = shard(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def __lowercase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Union[str, Any] = 50
lowerCAmelCase_ : Dict = jax.device_count()
lowerCAmelCase_ : List[Any] = num_samples * [prompt]
lowerCAmelCase_ : Tuple = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : Optional[int] = replicate(lowerCamelCase )
lowerCAmelCase_ : Any = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = shard(lowerCamelCase )
lowerCAmelCase_ : Any = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def __lowercase ( self : int ) -> Optional[int]:
lowerCAmelCase_, lowerCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
lowerCAmelCase_ : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : str = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[Any] = 50
lowerCAmelCase_ : Union[str, Any] = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : List[Any] = replicate(lowerCamelCase )
lowerCAmelCase_ : List[str] = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[str] = shard(lowerCamelCase )
lowerCAmelCase_ : Tuple = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def __lowercase ( self : List[str] ) -> Any:
lowerCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
lowerCAmelCase_, lowerCAmelCase_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , )
lowerCAmelCase_ : Any = scheduler.create_state()
lowerCAmelCase_ : Optional[Any] = scheduler_state
lowerCAmelCase_ : List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : int = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[str] = 50
lowerCAmelCase_ : str = jax.device_count()
lowerCAmelCase_ : List[Any] = num_samples * [prompt]
lowerCAmelCase_ : int = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : List[str] = replicate(lowerCamelCase )
lowerCAmelCase_ : Any = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = shard(lowerCamelCase )
lowerCAmelCase_ : Tuple = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def __lowercase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : Any = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , )
lowerCAmelCase_ : Optional[int] = replicate(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = shard(lowerCamelCase )
lowerCAmelCase_ : Tuple = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
lowerCAmelCase_ : Dict = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
lowerCAmelCase_, lowerCAmelCase_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , use_memory_efficient_attention=lowerCamelCase , )
lowerCAmelCase_ : List[str] = replicate(lowerCamelCase )
lowerCAmelCase_ : Dict = pipeline.prepare_inputs(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = shard(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
lowerCAmelCase_ : List[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 120
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
lowercase__ = {"bert_for_seq_generation": 5_1_2}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = []
lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : Optional[Any]="<::::>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Tuple , ) -> None:
"""simple docstring"""
snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
snake_case : Optional[Any] = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case : Any = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = self.__dict__.copy()
snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[int] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case : Any = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = []
snake_case : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
snake_case : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : int = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 83
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
lowercase__ = logging.getLogger(__name__)
lowercase__ = {"facebook/bart-base": BartForConditionalGeneration}
lowercase__ = {"facebook/bart-base": BartTokenizer}
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
'''--config_name''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=SCREAMING_SNAKE_CASE__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Where to store the final ONNX file.''' )
snake_case : List[str] = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="cpu" ) -> int:
'''simple docstring'''
snake_case : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case : Any = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ )
if model_name in ["facebook/bart-base"]:
snake_case : Dict = 0
snake_case : Optional[Any] = None
snake_case : int = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
'''simple docstring'''
model.eval()
snake_case : List[Any] = None
snake_case : Tuple = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__ ) )
with torch.no_grad():
snake_case : Optional[int] = '''My friends are cool but they eat too many carbs.'''
snake_case : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
snake_case : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=SCREAMING_SNAKE_CASE__ , )
logger.info('''Model exported to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : Any = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = ort_sess.run(
SCREAMING_SNAKE_CASE__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(SCREAMING_SNAKE_CASE__ ),
'''max_length''': np.array(SCREAMING_SNAKE_CASE__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
snake_case : List[str] = parse_args()
snake_case : Tuple = 5
snake_case : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case : str = torch.device(args.device )
snake_case ,snake_case : Any = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(SCREAMING_SNAKE_CASE__ )
if args.max_length:
snake_case : Tuple = args.max_length
if args.num_beams:
snake_case : List[Any] = args.num_beams
if args.output_file_path:
snake_case : str = args.output_file_path
else:
snake_case : int = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 83
| 1
|
import gc
import threading
import time
import psutil
import torch
class snake_case__ :
"""simple docstring"""
def __init__( self : Any ) -> Optional[int]:
a = psutil.Process()
a = False
def __UpperCAmelCase ( self : str ) -> str:
a = -1
while True:
a = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = True
a = threading.Thread(target=self.peak_monitor )
a = True
self.thread.start()
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = False
self.thread.join()
return self.cpu_memory_peak
__lowerCAmelCase : List[str] = PeakCPUMemory()
def __magic_name__ ( ):
'''simple docstring'''
a = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
a = torch.cuda.memory_allocated(A )
torch.cuda.reset_peak_memory_stats()
return measures
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
a = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
a = (torch.cuda.memory_allocated(A ) - start_measures[str(A )]) / 2**20
a = (torch.cuda.max_memory_allocated(A ) - start_measures[str(A )]) / 2**20
return measures
def __magic_name__ ( A : List[Any], A : Any ):
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(A )]:.2f}MiB""" )
a = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 107
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Optional[Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
for param in module.parameters():
snake_case__ : Tuple = False
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ : List[Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[str] = plt.imshow(snake_case_ )
fig.axes.get_xaxis().set_visible(snake_case_ )
fig.axes.get_yaxis().set_visible(snake_case_ )
plt.show()
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = datetime.now()
snake_case__ : Optional[Any] = current_time.strftime("%H:%M:%S" )
return timestamp
| 286
| 0
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
_UpperCAmelCase : float
_UpperCAmelCase : Tuple[int]
def lowerCAmelCase ( self : Optional[int]):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def lowerCAmelCase ( self : str):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa))
def lowerCAmelCase ( self : int):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : int = torch.arange(self.height * self.width)
__lowerCamelCase : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(SCREAMING_SNAKE_CASE__ ,self.width ,rounding_mode='trunc'),
] ,axis=1 ,)
return coords
@property
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , *__lowerCamelCase : Union[str, Any] = self.shape
__lowerCamelCase : Optional[Any] = int(np.prod(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = self.get_image_coords()
__lowerCamelCase : List[str] = torch.broadcast_to(coords.unsqueeze(0) ,[batch_size * inner_batch_size, *coords.shape])
__lowerCamelCase : Any = self.get_camera_rays(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = rays.view(SCREAMING_SNAKE_CASE__ ,inner_batch_size * self.height * self.width ,2 ,3)
return rays
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase : Tuple = coords.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Optional[int] = self.resolution()
__lowerCamelCase : Optional[int] = self.fov()
__lowerCamelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase : int = fracs * torch.tan(fov / 2)
__lowerCamelCase : int = fracs.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Union[str, Any] = (
self.z.view(SCREAMING_SNAKE_CASE__ ,1 ,3)
+ self.x.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, :1]
+ self.y.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, 1:]
)
__lowerCamelCase : Tuple = directions / directions.norm(dim=-1 ,keepdim=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(SCREAMING_SNAKE_CASE__ ,1 ,3) ,[batch_size, directions.shape[1], 3]),
directions,
] ,dim=2 ,)
return rays.view(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,2 ,3)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> DifferentiableProjectiveCamera:
__lowerCamelCase : Dict = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__lowerCamelCase : Dict = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase : List[str] = -z * 4
__lowerCamelCase : List[str] = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
__lowerCamelCase : int = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 73
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bert'''
def __init__( self : int , lowercase_ : int=30522 , lowercase_ : Any=768 , lowercase_ : Dict=12 , lowercase_ : Any=12 , lowercase_ : List[Any]=3072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : Dict=512 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1e-1_2 , lowercase_ : Tuple=0 , lowercase_ : int="absolute" , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Tuple) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 63
|
import math
class _UpperCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self : Dict , lowercase_ : list[list[float]] , lowercase_ : list[int]) -> int:
"""simple docstring"""
_UpperCamelCase = 0.0
_UpperCamelCase = 0.0
for i in range(len(lowercase_)):
da += math.pow((sample[i] - weights[0][i]) , 2)
da += math.pow((sample[i] - weights[1][i]) , 2)
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self : Any , lowercase_ : list[list[int | float]] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : float) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(lowercase_)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase__ ( ) ->None:
'''simple docstring'''
_UpperCamelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase = SelfOrganizingMap()
_UpperCamelCase = 3
_UpperCamelCase = 0.5
for _ in range(a__ ):
for j in range(len(a__ ) ):
# training sample
_UpperCamelCase = training_samples[j]
# Compute the winning vector
_UpperCamelCase = self_organizing_map.get_winner(a__ , a__ )
# Update the winning vector
_UpperCamelCase = self_organizing_map.update(a__ , a__ , a__ , a__ )
# classify test sample
_UpperCamelCase = [0, 0, 0, 1]
_UpperCamelCase = self_organizing_map.get_winner(a__ , a__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ConvNextFeatureExtractor''']
lowerCAmelCase_ = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 8
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 8
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_snake_case = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
_snake_case = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
_snake_case = mapped_key.replace('''*''' , _UpperCamelCase )
if "weight_g" in name:
_snake_case = '''weight_g'''
elif "weight_v" in name:
_snake_case = '''weight_v'''
elif "weight" in name:
_snake_case = '''weight'''
elif "bias" in name:
_snake_case = '''bias'''
else:
_snake_case = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = full_name.split('''conv_layers.''' )[-1]
_snake_case = name.split('''.''' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_snake_case = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_snake_case = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_snake_case = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_snake_case = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = '''gelu'''
_snake_case = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.02
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1E-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(_UpperCamelCase )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = '''Wav2Vec2FeatureExtractor'''
_snake_case = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True ) -> List[Any]:
"""simple docstring"""
if is_finetuned:
_snake_case, _snake_case, _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_snake_case, _snake_case, _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(_UpperCamelCase )
else:
_snake_case = convert_config(model[0] , _UpperCamelCase )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == '''layer''' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(_UpperCamelCase , '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
_snake_case = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCamelCase , )
_snake_case = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
_snake_case = SEWForCTC(_UpperCamelCase )
else:
_snake_case = SEWModel(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 355
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__A = {'''allegro/herbert-base-cased''': 5_14}
__A = {}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Any = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = HerbertTokenizer
def __init__( self : Tuple , A__ : str=None , A__ : Optional[Any]=None , A__ : Union[str, Any]=None , A__ : Optional[int]="<s>" , A__ : Optional[int]="<unk>" , A__ : str="<pad>" , A__ : List[Any]="<mask>" , A__ : Dict="</s>" , **A__ : Optional[int] , ) -> Optional[int]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , sep_token=A__ , **A__ , )
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def UpperCamelCase_ ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 278
| 0
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
print('Loading config file...' )
def flatten_yaml_as_dict(UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_="." ):
_UpperCamelCase : int = []
for k, v in d.items():
_UpperCamelCase : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase_ , UpperCAmelCase_ , sep=UpperCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase_ )
_UpperCamelCase : Tuple = argparse.Namespace()
with open(UpperCAmelCase_ , 'r' ) as yaml_file:
try:
_UpperCamelCase : int = yaml.load(UpperCAmelCase_ , Loader=yaml.FullLoader )
_UpperCamelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase_ )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(UpperCAmelCase_ , str(UpperCAmelCase_ ) ) )
return config
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = MobileViTVaConfig()
_UpperCamelCase : Union[str, Any] = False
# dataset
if task_name.startswith('imagenet1k_' ):
_UpperCamelCase : Dict = 1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
_UpperCamelCase : List[str] = 3_8_4
else:
_UpperCamelCase : Union[str, Any] = 2_5_6
_UpperCamelCase : Union[str, Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_UpperCamelCase : Optional[int] = 2_1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
_UpperCamelCase : Tuple = 3_8_4
else:
_UpperCamelCase : Dict = 2_5_6
_UpperCamelCase : Any = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_UpperCamelCase : Optional[Any] = 1_5_1
_UpperCamelCase : Any = 5_1_2
_UpperCamelCase : Optional[Any] = 'ade20k-id2label.json'
_UpperCamelCase : Optional[int] = True
elif task_name.startswith('voc_' ):
_UpperCamelCase : Any = 2_1
_UpperCamelCase : List[str] = 5_1_2
_UpperCamelCase : Union[str, Any] = 'pascal-voc-id2label.json'
_UpperCamelCase : Optional[Any] = True
# orig_config
_UpperCamelCase : Dict = load_orig_config_file(UpperCAmelCase_ )
assert getattr(UpperCAmelCase_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_UpperCamelCase : Dict = getattr(UpperCAmelCase_ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(UpperCAmelCase_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_UpperCamelCase : Tuple = getattr(UpperCAmelCase_ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_UpperCamelCase : List[str] = getattr(UpperCAmelCase_ , 'model.segmentation.output_stride' , 1_6 )
if "_deeplabv3" in task_name:
_UpperCamelCase : Optional[Any] = getattr(UpperCAmelCase_ , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] )
_UpperCamelCase : str = getattr(UpperCAmelCase_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 )
_UpperCamelCase : List[str] = getattr(UpperCAmelCase_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_UpperCamelCase : Dict = 'huggingface/label-files'
_UpperCamelCase : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : Tuple = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = idalabel
_UpperCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = dct.pop(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = val
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=False ):
if base_model:
_UpperCamelCase : Optional[int] = ''
else:
_UpperCamelCase : Union[str, Any] = 'mobilevitv2.'
_UpperCamelCase : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_UpperCamelCase : int = k[8:]
else:
_UpperCamelCase : Any = k
if ".block." in k:
_UpperCamelCase : Optional[int] = k_new.replace('.block.' , '.' )
if ".conv." in k:
_UpperCamelCase : Union[str, Any] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_UpperCamelCase : List[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_UpperCamelCase : Union[str, Any] = k_new.replace('conv_1.' , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
_UpperCamelCase : Optional[int] = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
_UpperCamelCase : Dict = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_UpperCamelCase : List[str] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
_UpperCamelCase : Tuple = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
_UpperCamelCase : str = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
_UpperCamelCase : Optional[Any] = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
_UpperCamelCase : List[Any] = [0, 1]
elif i == 4:
_UpperCamelCase : Any = [0, 1, 2, 3]
elif i == 5:
_UpperCamelCase : List[Any] = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
_UpperCamelCase : Tuple = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
_UpperCamelCase : Tuple = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
_UpperCamelCase : List[str] = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
_UpperCamelCase : str = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_UpperCamelCase : Optional[int] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_UpperCamelCase : Tuple = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_UpperCamelCase : Any = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_UpperCamelCase : Any = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_UpperCamelCase : Optional[int] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_UpperCamelCase : str = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_UpperCamelCase : Dict = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_UpperCamelCase : Union[str, Any] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(UpperCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( ):
_UpperCamelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_UpperCamelCase : str = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase_ , UpperCAmelCase_ )
# load original state_dict
_UpperCamelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation(UpperCAmelCase_ ).eval()
_UpperCamelCase : Optional[int] = False
else:
_UpperCamelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase_ ).eval()
_UpperCamelCase : str = False
# remove and rename some keys of load the original model
_UpperCamelCase : Tuple = checkpoint
remove_unused_keys(UpperCAmelCase_ )
_UpperCamelCase : Tuple = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load modified state_dict
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCamelCase : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
_UpperCamelCase : Any = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ )
# verify classification model
if task_name.startswith('imagenet' ):
_UpperCamelCase : List[str] = outputs.logits
_UpperCamelCase : List[Any] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_UpperCamelCase : List[str] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
snake_case_ : int = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 83
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
_UpperCamelCase : Dict = global_rng
_UpperCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=7 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : int=2000 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=16000 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : List[str] = min_seq_length
_UpperCamelCase : Optional[int] = max_seq_length
_UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase : List[str] = feature_size
_UpperCamelCase : List[str] = padding_value
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Dict = return_attention_mask
_UpperCamelCase : Tuple = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCamelCase : int = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = WavaVecaFeatureExtractor
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase : Tuple = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_UpperCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test batched
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : Optional[int] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase : str = np.asarray(lowerCamelCase__ )
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : int = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='np' )
_UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[str] = range(800 ,1400 ,200 )
_UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Union[str, Any] = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_UpperCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : int = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Any = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase : Tuple = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
| 83
| 1
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> str:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[str] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : List[Any] = dataset_size < in_memory_max_size
else:
lowercase_ : Optional[int] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 356
|
'''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__A : int = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase ( ):
lowercase_ : Optional[Any] = os.path.dirname(os.path.realpath(__snake_case ) )
lowercase_ : int = os.path.join(__snake_case , '''words.txt''' )
lowercase_ : Tuple = ''''''
with open(__snake_case ) as f:
lowercase_ : Union[str, Any] = f.readline()
lowercase_ : Any = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
lowercase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 33
|
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}")
| 286
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowercase_ = logging.getLogger(__name__)
@dataclass
class _snake_case :
UpperCamelCase__ : Optional[Any] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
UpperCamelCase__ : Union[str, Any] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase__ : str =field(
default=1_0_2_4 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase__ : Any =field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
UpperCamelCase__ : Optional[Any] =field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase__ : str =field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ : Any =field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ : Tuple =field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ : Optional[Any] =field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""})
UpperCamelCase__ : Optional[int] =field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""})
UpperCamelCase__ : Union[str, Any] =field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""})
def A__ ( self : Dict ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
lowercase__ = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__ = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _snake_case :
UpperCamelCase__ : Optional[int] =field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
UpperCamelCase__ : Optional[Any] =field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
UpperCamelCase__ : Optional[int] =field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
UpperCamelCase__ : List[str] =field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase__ : List[str] =field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase__ : List[str] =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ : Union[str, Any] =field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
datasets.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__ = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__ = data_args.train_file.split("." )[-1]
lowercase__ = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__ = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
lowercase__ = load_dataset("csv" , data_files=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__ = load_dataset("json" , data_files=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__ = raw_datasets["""train"""].features["""label"""].names
lowercase__ = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCAmelCase__ , )
lowercase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__ = {"""Refused""": 0, """Entailed""": 1}
lowercase__ = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowercase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE_ ):
lowercase__ = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
lowercase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__ = examples["""statement"""]
lowercase__ = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
lowercase__ = tokenizer(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ )
lowercase__ = examples["""label"""]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
lowercase__ = raw_datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
lowercase__ = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowercase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
lowercase__ = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowercase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
lowercase__ = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
lowercase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE_ ):
lowercase__ = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions
lowercase__ = np.argmax(UpperCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ = default_data_collator
elif training_args.fpaa:
lowercase__ = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 )
else:
lowercase__ = None
# Initialize our Trainer
lowercase__ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
lowercase__ = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , UpperCAmelCase__ )
trainer.save_metrics("train" , UpperCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate(eval_dataset=UpperCAmelCase__ )
lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ )
lowercase__ = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("eval" , UpperCAmelCase__ )
trainer.save_metrics("eval" , UpperCAmelCase__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__ = predict_dataset.remove_columns("label" )
lowercase__ = trainer.predict(UpperCAmelCase__ , metric_key_prefix="predict" ).predictions
lowercase__ = np.argmax(UpperCAmelCase__ , axis=1 )
lowercase__ = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(UpperCAmelCase__ ):
lowercase__ = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
lowercase__ = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _snake_case ( lowercase__):
UpperCamelCase__ : str ="""dpr"""
def __init__( self : int, __lowercase : List[str]=3_0522, __lowercase : Any=768, __lowercase : Union[str, Any]=12, __lowercase : Optional[int]=12, __lowercase : List[Any]=3072, __lowercase : List[str]="gelu", __lowercase : Union[str, Any]=0.1, __lowercase : str=0.1, __lowercase : List[str]=512, __lowercase : Optional[int]=2, __lowercase : Dict=0.02, __lowercase : Any=1e-1_2, __lowercase : List[Any]=0, __lowercase : str="absolute", __lowercase : int = 0, **__lowercase : str, ):
super().__init__(pad_token_id=__lowercase, **__lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = projection_dim
lowercase__ = position_embedding_type
| 224
| 0
|
import math
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 0.1 ):
'''simple docstring'''
__UpperCamelCase :str = 3
__UpperCamelCase :Tuple = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _lowercase ( _lowercase ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _lowercase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 365
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase = concatenate_datasets
_lowercase = DownloadConfig
_lowercase = DownloadManager
_lowercase = DownloadMode
_lowercase = DownloadConfig
_lowercase = DownloadMode
_lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 229
| 0
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
lowerCAmelCase : str = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
lowerCAmelCase : int = numbers[i]
if number < 0:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = min_till_now, max_till_now
lowerCAmelCase : List[Any] = max(SCREAMING_SNAKE_CASE , max_till_now * number )
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase : str = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 108
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278
| 0
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a ( lowercase__ , unittest.TestCase ):
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
snake_case_ = True
def A_ ( self : Optional[Any] ):
super().setUp()
snake_case_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A_ ( self : Dict , lowercase_ : Optional[Any] ):
snake_case_ = """こんにちは、世界。 \nこんばんは、世界。"""
snake_case_ = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def A_ ( self : List[Any] , lowercase_ : Optional[int] ):
snake_case_ = self.get_input_output_texts(_UpperCamelCase )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def A_ ( self : Tuple ):
pass # TODO add if relevant
def A_ ( self : Optional[Any] ):
pass # TODO add if relevant
def A_ ( self : Dict ):
pass # TODO add if relevant
def A_ ( self : List[str] ):
snake_case_ = self.tokenizer_class(self.vocab_file )
snake_case_ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A_ ( self : int ):
snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_UpperCamelCase )
snake_case_ = """こんにちは、世界。\nこんばんは、世界。"""
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
snake_case_ = pickle.load(_UpperCamelCase )
snake_case_ = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def A_ ( self : int ):
snake_case_ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A_ ( self : Optional[Any] ):
try:
snake_case_ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A_ ( self : Union[str, Any] ):
try:
snake_case_ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A_ ( self : Optional[int] ):
snake_case_ = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A_ ( self : List[str] ):
try:
snake_case_ = MecabTokenizer(
do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def A_ ( self : int ):
snake_case_ = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def A_ ( self : Union[str, Any] ):
snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_UpperCamelCase )
snake_case_ = """こんにちは、世界。\nこんばんは、世界。"""
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
snake_case_ = pickle.load(_UpperCamelCase )
snake_case_ = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_sudachi
def A_ ( self : str ):
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A_ ( self : Dict ):
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def A_ ( self : List[Any] ):
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def A_ ( self : List[str] ):
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def A_ ( self : str ):
snake_case_ = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A_ ( self : Any ):
snake_case_ = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A_ ( self : List[Any] ):
snake_case_ = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def A_ ( self : Tuple ):
snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_UpperCamelCase )
snake_case_ = """こんにちは、世界。\nこんばんは、世界。"""
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
snake_case_ = pickle.load(_UpperCamelCase )
snake_case_ = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_jumanpp
def A_ ( self : Any ):
snake_case_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A_ ( self : Optional[Any] ):
snake_case_ = JumanppTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A_ ( self : List[Any] ):
snake_case_ = JumanppTokenizer(normalize_text=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A_ ( self : Tuple ):
snake_case_ = JumanppTokenizer(trim_whitespace=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def A_ ( self : Any ):
snake_case_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def A_ ( self : Dict ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
snake_case_ = {}
for i, token in enumerate(_UpperCamelCase ):
snake_case_ = i
snake_case_ = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def A_ ( self : Any ):
snake_case_ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
snake_case_ = tokenizer.subword_tokenizer
snake_case_ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_UpperCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
snake_case_ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_UpperCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def A_ ( self : Optional[int] ):
snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a ( lowercase__ , unittest.TestCase ):
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
def A_ ( self : List[Any] ):
super().setUp()
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A_ ( self : List[Any] , **lowercase_ : Optional[int] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_UpperCamelCase )
def A_ ( self : Union[str, Any] , lowercase_ : int ):
snake_case_ = """こんにちは、世界。 \nこんばんは、世界。"""
snake_case_ = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def A_ ( self : Optional[int] ):
pass # TODO add if relevant
def A_ ( self : Union[str, Any] ):
pass # TODO add if relevant
def A_ ( self : Optional[Any] ):
pass # TODO add if relevant
def A_ ( self : Optional[Any] ):
snake_case_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
snake_case_ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_UpperCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A_ ( self : List[str] ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
snake_case_ = {}
for i, token in enumerate(_UpperCamelCase ):
snake_case_ = i
snake_case_ = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def A_ ( self : Any ):
snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a ( unittest.TestCase ):
def A_ ( self : Dict ):
snake_case_ = """cl-tohoku/bert-base-japanese"""
snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
class a ( unittest.TestCase ):
def A_ ( self : Any ):
snake_case_ = """cl-tohoku/bert-base-japanese"""
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
snake_case_ = """bert-base-cased"""
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 358
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a ( _lowerCamelCase ):
snake_case_ = "xlm-roberta-xl"
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=25_0880 , lowercase_ : Tuple=2560 , lowercase_ : str=36 , lowercase_ : List[str]=32 , lowercase_ : Optional[Any]=1_0240 , lowercase_ : List[str]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=514 , lowercase_ : Any=1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=1e-05 , lowercase_ : List[Any]=1 , lowercase_ : str=0 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]="absolute" , lowercase_ : str=True , lowercase_ : str=None , **lowercase_ : Tuple , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
@property
def A_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__UpperCAmelCase = get_logger()
__UpperCAmelCase = None
class lowerCamelCase (TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
super().__init__(features=_UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
f"Expected {device} to be a `str` not {type(_UpperCamelCase )}, as `jaxlib.xla_extension.Device` "
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
UpperCAmelCase_ : int = device if isinstance(_UpperCamelCase , _UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
f"device: {str(jax.devices()[0] )}." )
UpperCAmelCase_ : Dict = str(jax.devices()[0] )
UpperCAmelCase_ : str = jnp_array_kwargs
@staticmethod
def __UpperCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(_UpperCamelCase ): device for device in jax.devices()}
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
import jax
import jax.numpy as jnp
if isinstance(_UpperCamelCase , _UpperCamelCase ) and column:
if all(
isinstance(_UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_UpperCamelCase , axis=0 )
return column
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
import jax
import jax.numpy as jnp
if isinstance(_UpperCamelCase , (str, bytes, type(_UpperCamelCase )) ):
return value
elif isinstance(_UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ : Any = {}
if isinstance(_UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase_ : Dict = {'dtype': jnp.intaa}
else:
UpperCAmelCase_ : List[str] = {'dtype': jnp.intaa}
elif isinstance(_UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ : Tuple = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : List[str] = np.asarray(_UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ : Any = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_UpperCamelCase , '__array__' ) and not isinstance(_UpperCamelCase , jax.Array ):
UpperCAmelCase_ : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_UpperCamelCase ) for substruct in data_struct] )
elif isinstance(_UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
return map_nested(self._recursive_tensorize , _UpperCamelCase , map_list=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Mapping:
UpperCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_row(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.python_features_decoder.decode_row(_UpperCamelCase )
return self.recursive_tensorize(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> "jax.Array":
UpperCAmelCase_ : Optional[Any] = self.numpy_arrow_extractor().extract_column(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.python_features_decoder.decode_column(_UpperCamelCase , pa_table.column_names[0] )
UpperCAmelCase_ : List[str] = self.recursive_tensorize(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self._consolidate(_UpperCamelCase )
return column
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Mapping:
UpperCAmelCase_ : str = self.numpy_arrow_extractor().extract_batch(_UpperCamelCase )
UpperCAmelCase_ : Dict = self.python_features_decoder.decode_batch(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.recursive_tensorize(_UpperCamelCase )
for column_name in batch:
UpperCAmelCase_ : str = self._consolidate(batch[column_name] )
return batch
| 29
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21
| 0
|
'''simple docstring'''
import torch
from torch import nn
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False ) -> str:
super().__init__()
_a = n_token
_a = d_embed
_a = d_proj
_a = cutoffs + [n_token]
_a = [0] + self.cutoffs
_a = div_val
_a = self.cutoffs[0]
_a = len(self.cutoffs ) - 1
_a = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_a = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_a = nn.Parameter(torch.zeros(self.n_clusters ) )
_a = nn.ModuleList()
_a = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
else:
self.out_projs.append(__magic_name__ )
self.out_layers.append(nn.Linear(__magic_name__ , __magic_name__ ) )
else:
for i in range(len(self.cutoffs ) ):
_a , _a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
self.out_layers.append(nn.Linear(__magic_name__ , r_idx - l_idx ) )
_a = keep_order
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
if proj is None:
_a = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_a = nn.functional.linear(__magic_name__ , proj.t().contiguous() )
_a = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None , __magic_name__=False ) -> List[Any]:
if labels is not None:
# Shift so that tokens < n predict n
_a = hidden[..., :-1, :].contiguous()
_a = labels[..., 1:].contiguous()
_a = hidden.view(-1 , hidden.size(-1 ) )
_a = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
_a = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_a = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_a = labels != -1_00
_a = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
_a = (
-nn.functional.log_softmax(__magic_name__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_a = nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
_a , _a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_a , _a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a = self.out_layers[0].weight[l_idx:r_idx]
_a = self.out_layers[0].bias[l_idx:r_idx]
else:
_a = self.out_layers[i].weight
_a = self.out_layers[i].bias
if i == 0:
_a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
_a , _a , _a = weights[0], biases[0], self.out_projs[0]
_a = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = nn.functional.log_softmax(__magic_name__ , dim=1 )
if labels is None:
_a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_a = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
_a = 0
_a = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
_a , _a = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_a = (labels >= l_idx) & (labels < r_idx)
_a = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_a = labels.index_select(0 , __magic_name__ ) - l_idx
_a = head_logprob.index_select(0 , __magic_name__ )
_a = hidden.index_select(0 , __magic_name__ )
else:
_a = hidden
if i == 0:
if labels is not None:
_a = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_a = head_logprob[:, : self.cutoffs[0]]
else:
_a , _a , _a = weights[i], biases[i], self.out_projs[i]
_a = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = nn.functional.log_softmax(__magic_name__ , dim=1 )
_a = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_a = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_a = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_a = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __magic_name__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __UpperCAmelCase ( self , __magic_name__ ) -> Tuple:
if self.n_clusters == 0:
_a = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
_a , _a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_a , _a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a = self.out_layers[0].weight[l_idx:r_idx]
_a = self.out_layers[0].bias[l_idx:r_idx]
else:
_a = self.out_layers[i].weight
_a = self.out_layers[i].bias
if i == 0:
_a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
_a , _a , _a = weights[0], biases[0], self.out_projs[0]
_a = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_a = nn.functional.log_softmax(__magic_name__ , dim=1 )
_a = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
_a , _a = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_a = head_logprob[:, : self.cutoffs[0]]
else:
_a , _a , _a = weights[i], biases[i], self.out_projs[i]
_a = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = nn.functional.log_softmax(__magic_name__ , dim=1 )
_a = head_logprob[:, -i] + tail_logprob_i
_a = logprob_i
return out
| 104
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104
| 1
|
"""simple docstring"""
from math import loga
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'all_results.json' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
else:
raise ValueError(f"can't find {path}" )
return results
lowercase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
import xla_spawn
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = time()
xla_spawn.main()
lowerCAmelCase_ : Any = time()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
import xla_spawn
lowerCAmelCase_ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
xla_spawn.main()
| 224
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCamelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61
| 1
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 188
|
'''simple docstring'''
import numpy as np
import qiskit
def UpperCamelCase_ ( snake_case_ : int = 8 , snake_case_ : int | None = None ) -> str:
'''simple docstring'''
__lowerCAmelCase = np.random.default_rng(seed=snake_case_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
__lowerCAmelCase = rng.integers(2 , size=snake_case_ )
# The set of states Alice will prepare.
__lowerCAmelCase = rng.integers(2 , size=snake_case_ )
# Measurement basis for Bob's qubits.
__lowerCAmelCase = rng.integers(2 , size=snake_case_ )
# Quantum Circuit to simulate BB84
__lowerCAmelCase = qiskit.QuantumCircuit(snake_case_ , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case_ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowerCAmelCase = qiskit.execute(snake_case_ , snake_case_ , shots=1 , seed_simulator=snake_case_ )
# Returns the result of measurement.
__lowerCAmelCase = job.result().get_counts(snake_case_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowerCAmelCase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case_ , snake_case_ , snake_case_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowerCAmelCase = gen_key[:key_len] if len(snake_case_ ) >= key_len else gen_key.ljust(snake_case_ , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 229
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "hopper-medium-v2"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = gym.make(env_name)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__ : Optional[int] = env.reset()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : int = 1_000
SCREAMING_SNAKE_CASE__ : List[str] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(obs, planning_horizon=32)
# execute action in environment
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__ : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
F' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__ : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'Total reward: {total_reward}')
| 339
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def __A ( self : List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = ScoreSdeVeScheduler()
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Tuple ) -> str:
__lowerCamelCase = '''google/ncsnpp-church-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 339
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
A : Tuple = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __A( unittest.TestCase , a ):
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.tool(_snake_case , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.remote_tool(_snake_case , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.tool(text=_snake_case , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.remote_tool(text=_snake_case , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
| 6
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
| 0
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :CommonSchedulerState
# setable values
SCREAMING_SNAKE_CASE__ :jnp.ndarray
SCREAMING_SNAKE_CASE__ :jnp.ndarray
SCREAMING_SNAKE_CASE__ :Optional[int] = None
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , __a : CommonSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray ) -> List[Any]:
return cls(common=__a , init_noise_sigma=__a , timesteps=__a )
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :DDPMSchedulerState
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ :jnp.dtype
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
return True
@register_to_config
def __init__( self : Union[str, Any] , __a : int = 1000 , __a : float = 0.00_01 , __a : float = 0.02 , __a : str = "linear" , __a : Optional[jnp.ndarray] = None , __a : str = "fixed_small" , __a : bool = True , __a : str = "epsilon" , __a : jnp.dtype = jnp.floataa , ) -> str:
_UpperCamelCase : List[str] = dtype
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
if common is None:
_UpperCamelCase : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_UpperCamelCase : Dict = jnp.array(1.0 , dtype=self.dtype )
_UpperCamelCase : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__a , init_noise_sigma=__a , timesteps=__a , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : Optional[int] = None ) -> jnp.ndarray:
return sample
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : DDPMSchedulerState , __a : int , __a : Tuple = () ) -> DDPMSchedulerState:
_UpperCamelCase : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_UpperCamelCase : Any = (jnp.arange(0 , __a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__a , timesteps=__a , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : DDPMSchedulerState , __a : List[str] , __a : Optional[Any]=None , __a : List[str]=None ) -> Optional[int]:
_UpperCamelCase : List[Any] = state.common.alphas_cumprod[t]
_UpperCamelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_UpperCamelCase : Dict = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_UpperCamelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_UpperCamelCase : List[Any] = jnp.clip(__a , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_UpperCamelCase : Dict = jnp.log(jnp.clip(__a , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
_UpperCamelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_UpperCamelCase : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_UpperCamelCase : str = variance
_UpperCamelCase : int = state.common.betas[t]
_UpperCamelCase : Dict = (predicted_variance + 1) / 2
_UpperCamelCase : Any = frac * max_log + (1 - frac) * min_log
return variance
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : int , __a : jnp.ndarray , __a : Optional[jax.random.KeyArray] = None , __a : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_UpperCamelCase : int = timestep
if key is None:
_UpperCamelCase : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_UpperCamelCase, _UpperCamelCase : List[str] = jnp.split(__a , sample.shape[1] , axis=1 )
else:
_UpperCamelCase : Dict = None
# 1. compute alphas, betas
_UpperCamelCase : Tuple = state.common.alphas_cumprod[t]
_UpperCamelCase : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_UpperCamelCase : str = 1 - alpha_prod_t
_UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_UpperCamelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_UpperCamelCase : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
_UpperCamelCase : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_UpperCamelCase : Union[str, Any] = jnp.clip(__a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_UpperCamelCase : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_UpperCamelCase : int = jax.random.split(__a , num=1 )
_UpperCamelCase : int = jax.random.normal(__a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__a , __a , predicted_variance=__a ) ** 0.5) * noise
_UpperCamelCase : List[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_UpperCamelCase : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__a , state=__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , ) -> jnp.ndarray:
return add_noise_common(state.common , __a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , ) -> jnp.ndarray:
return get_velocity_common(state.common , __a , __a , __a )
def __len__( self : Optional[Any] ) -> Any:
return self.config.num_train_timesteps
| 310
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 1
|
'''simple docstring'''
import os
import string
import sys
lowerCAmelCase__ = 1 << 8
lowerCAmelCase__ = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowerCAmelCase__ = KEYMAP['''up''']
lowerCAmelCase__ = KEYMAP['''left''']
if sys.platform == "win32":
lowerCAmelCase__ = []
lowerCAmelCase__ = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase__ = ord(str(i))
def _A ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
__lowercase = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(A__ ) == 0:
# Read the keystroke
__lowercase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__lowercase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__lowercase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(A__ )
if ord(A__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__lowercase = chr(KEYMAP['''esc'''] )
except KeyError:
__lowercase = cha[1]
else:
__lowercase = ch.decode(A__ )
else:
__lowercase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__lowercase = sys.stdin.fileno()
__lowercase = termios.tcgetattr(A__ )
try:
tty.setraw(A__ )
__lowercase = sys.stdin.read(1 )
finally:
termios.tcsetattr(A__ , termios.TCSADRAIN , A__ )
return ch
def _A ( ):
"""simple docstring"""
__lowercase = get_raw_chars()
if ord(A__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(A__ ) == KEYMAP["esc"]:
__lowercase = get_raw_chars()
if ord(A__ ) == KEYMAP["mod_int"]:
__lowercase = get_raw_chars()
if ord(A__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(A__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 104
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 104
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : List[Any] ,**lowercase__ : List[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase = deprecated_arg[3:]
setattr(self ,snake_case__ ,not kwargs.pop(snake_case__ ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
__lowercase = kwargs.pop('''torchscript''' ,self.torchscript )
__lowercase = kwargs.pop('''torch_xla_tpu_print_metrics''' ,self.torch_xla_tpu_print_metrics )
__lowercase = kwargs.pop('''fp16_opt_level''' ,self.fpaa_opt_level )
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Trace the models using torchscript'} )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
SCREAMING_SNAKE_CASE : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ):
requires_backends(self ,['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase = torch.device('''cpu''' )
__lowercase = 0
elif is_torch_tpu_available():
__lowercase = xm.xla_device()
__lowercase = 0
else:
__lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
requires_backends(self ,['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
requires_backends(self ,['''torch'''] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
requires_backends(self ,['''torch'''] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.n_gpu > 0
| 361
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52
| 0
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_a = logging.get_logger('transformers.models.speecht5')
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
hf_model.apply_weight_norm()
UpperCAmelCase_ : int = checkpoint["input_conv.weight_g"]
UpperCAmelCase_ : int = checkpoint["input_conv.weight_v"]
UpperCAmelCase_ : Tuple = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase_ : Optional[int] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase_ : Tuple = checkpoint[f"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase_ : str = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase_ : int = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase_ : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase_ : Optional[int] = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase_ : Optional[int] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase_ : Optional[int] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase_ : Tuple = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase_ : Optional[Any] = checkpoint["output_conv.1.weight_g"]
UpperCAmelCase_ : Dict = checkpoint["output_conv.1.weight_v"]
UpperCAmelCase_ : Dict = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, ):
if config_path is not None:
UpperCAmelCase_ : List[str] = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase_ : Any = SpeechTaHifiGanConfig()
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"], __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Any = np.load(__lowerCamelCase )
UpperCAmelCase_ : str = stats[0].reshape(-1 )
UpperCAmelCase_ : str = stats[1].reshape(-1 )
UpperCAmelCase_ : int = torch.from_numpy(__lowerCamelCase ).float()
UpperCAmelCase_ : Dict = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_a = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 61
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : int = (image_size // patch_size) ** 2
UpperCAmelCase_ : Optional[Any] = num_patches + 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Optional[Any] = DeiTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = DeiTModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowercase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Dict = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[str] = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Any = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCAmelCase_ : str = problem_type["title"]
UpperCAmelCase_ : List[Any] = problem_type["num_labels"]
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_ ) as warning_list:
UpperCAmelCase_ : List[str] = model(**lowercase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( ):
UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowercase_ )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : str = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : List[str] = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : int = model(lowercase_ )
| 61
| 1
|
def _lowercase ( __A ):
'''simple docstring'''
if not isinstance(__a ,__a ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
__UpperCamelCase = 0
__UpperCamelCase = str(__a )
while len(__a ) != 1:
__UpperCamelCase = [int(__a ) for i in num_string]
__UpperCamelCase = 1
for i in range(0 ,len(__a ) ):
total *= numbers[i]
__UpperCamelCase = str(__a )
steps += 1
return steps
def _lowercase ( __A ):
'''simple docstring'''
if not isinstance(__a ,__a ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
__UpperCamelCase = 0
__UpperCamelCase = str(__a )
while len(__a ) != 1:
__UpperCamelCase = [int(__a ) for i in num_string]
__UpperCamelCase = 0
for i in range(0 ,len(__a ) ):
total += numbers[i]
__UpperCamelCase = str(__a )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
'''simple docstring'''
from PIL import Image
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = image.size
__UpperCamelCase = 0
__UpperCamelCase = image.load()
for i in range(__A ):
for j in range(__A ):
__UpperCamelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__UpperCamelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : Optional[int] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 243
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase__ = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
UpperCAmelCase__ = "hopper-medium-v2"
UpperCAmelCase__ = gym.make(env_name)
UpperCAmelCase__ = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
UpperCAmelCase__ = env.reset()
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1000
UpperCAmelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase__ = pipeline(obs, planning_horizon=32)
# execute action in environment
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = env.step(denorm_actions)
UpperCAmelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase__ = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 339
|
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE_ ( __snake_case ):
__magic_name__: Dict = 'gptj'
__magic_name__: int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : str=50400 , _A : Optional[Any]=2048 , _A : Optional[int]=4096 , _A : Tuple=28 , _A : Tuple=16 , _A : Union[str, Any]=64 , _A : int=None , _A : Dict="gelu_new" , _A : Any=0.0 , _A : Tuple=0.0 , _A : Union[str, Any]=0.0 , _A : int=1E-5 , _A : int=0.0_2 , _A : int=True , _A : int=50256 , _A : Optional[int]=50256 , _A : int=False , **_A : Dict , ) -> int:
"""simple docstring"""
snake_case_ : Dict = vocab_size
snake_case_ : Union[str, Any] = n_positions
snake_case_ : Optional[Any] = n_embd
snake_case_ : Union[str, Any] = n_layer
snake_case_ : Union[str, Any] = n_head
snake_case_ : Optional[Any] = n_inner
snake_case_ : List[str] = rotary_dim
snake_case_ : Optional[int] = activation_function
snake_case_ : List[Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Optional[Any] = attn_pdrop
snake_case_ : Dict = layer_norm_epsilon
snake_case_ : List[Any] = initializer_range
snake_case_ : Optional[int] = use_cache
snake_case_ : Optional[int] = bos_token_id
snake_case_ : Optional[Any] = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class SCREAMING_SNAKE_CASE_ ( __snake_case ):
def __init__( self : Dict , _A : int , _A : Optional[int] = "default" , _A : Tuple = None , _A : Optional[int] = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
snake_case_ : Any = 0
@property
def UpperCAmelCase_ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
snake_case_ : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
snake_case_ : Optional[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case_ : str = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
return self._config.n_head
def UpperCAmelCase_ ( self : Tuple , _A : Optional[int] , _A : Dict = -1 , _A : List[str] = -1 , _A : Optional[Any] = False , _A : Dict = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case_ : Dict = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
snake_case_ : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case_ ,snake_case_ : Union[str, Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case_ : int = seqlen + 2
snake_case_ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ : str = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
snake_case_ : Union[str, Any] = common_inputs['attention_mask']
if self.use_past:
snake_case_ : Tuple = ordered_inputs['attention_mask'].dtype
snake_case_ : Any = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
return 13
| 362
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
snake_case_ : Dict = precision
snake_case_ : str = ceil(precision / 14 )
snake_case_ : str = 42_68_80 * Decimal(1_00_05 ).sqrt()
snake_case_ : Tuple = 1
snake_case_ : int = 13_59_14_09
snake_case_ : Tuple = Decimal(__a )
for k in range(1 , __a ):
snake_case_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 88
| 0
|
def _A ( _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowercase ) )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
if index == len(_lowercase ):
return True
# Recursive Step
for i in range(_lowercase ):
if valid_coloring(graph[index] , _lowercase , _lowercase ):
# Color current vertex
__UpperCamelCase = i
# Validate coloring
if util_color(_lowercase , _lowercase , _lowercase , index + 1 ):
return True
# Backtrack
__UpperCamelCase = -1
return False
def _A ( _lowercase , _lowercase ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = [-1] * len(_lowercase )
if util_color(_lowercase , _lowercase , _lowercase , 0 ):
return colored_vertices
return []
| 310
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 1
|
"""simple docstring"""
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Any:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
a__ : Union[str, Any] = len(UpperCamelCase__)
a__ : Tuple = max(UpperCamelCase__)
a__ : Optional[int] = min(UpperCamelCase__)
# create the counting array
a__ : Optional[Any] = coll_max + 1 - coll_min
a__ : str = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__):
a__ : List[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
a__ : List[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__)):
a__ : List[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase_ ( _lowercase : Dict) -> Tuple:
"""simple docstring"""
return "".join([chr(UpperCamelCase__) for i in counting_sort([ord(UpperCamelCase__) for c in string])])
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
_lowercase : str =input("Enter numbers separated by a comma:\n").strip()
_lowercase : Union[str, Any] =[int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 363
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( _lowercase : List[str]) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : Union[str, Any] = emb.weight.shape
a__ : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
a__ : Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int=None) -> List[Any]:
"""simple docstring"""
a__ : List[str] = {}
for old_key in state_dict.keys():
a__ : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a__ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''')
else:
a__ : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""")
if "gate" in key:
a__ : Tuple = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""")
if "fc2" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc2.""" , """.ffn.fc2.""")
if "fc1" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""")
if ".encoder_attn." in key:
a__ : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""")
if "encoder_attn_layer_norm" in key:
a__ : Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""")
if "final_layer_norm" in key:
a__ : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""")
a__ : str = state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : str = WEIGHTS_NAME) -> Tuple:
"""simple docstring"""
a__ : Tuple = []
a__ : Optional[Any] = 0
os.makedirs(_lowercase , exist_ok=_lowercase)
for expert in range(_lowercase):
a__ : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase):
a__ : List[str] = torch.load(_lowercase)["""model"""]
remove_ignore_keys_(_lowercase)
a__ : Tuple = rename_fairseq_keys(_lowercase , _lowercase)
a__ : str = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
torch.save(_lowercase , _lowercase)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(_lowercase)[0]].dtype)
# Add the last block
a__ : int = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
a__ : Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""")["""model"""]
remove_ignore_keys_(_lowercase)
a__ : List[str] = rename_fairseq_keys(_lowercase , _lowercase)
a__ : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase) == 1:
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
torch.save(_lowercase , _lowercase)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase)
# Otherwise, let's build the index
a__ : List[str] = {}
for idx, shard in enumerate(_lowercase):
a__ : Union[str, Any] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase):05d}.bin''')
a__ : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_lowercase , os.path.join(_lowercase , _lowercase))
for key in shard:
a__ : Tuple = shard_file
# Add the metadata
a__ : Tuple = {"""total_size""": total_size}
a__ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase) , """w""" , encoding="""utf-8""") as f:
a__ : Dict = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase) + """\n"""
f.write(_lowercase)
return metadata, index
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase : Tuple =parser.parse_args()
_lowercase , _lowercase : List[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowercase : int =NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowercase : List[str] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 266
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 76
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : List[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__lowerCamelCase : str = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A_ ( _lowerCAmelCase ) -> str:
def remove_articles(_lowerCAmelCase ):
UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
UpperCamelCase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase )
UpperCamelCase : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase : Tuple = scount * numref
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase : Dict = ccount * numref
# KEEP
UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep
UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter
UpperCamelCase : Dict = sgramcounter_rep & rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Any = 1
UpperCamelCase : Any = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCamelCase : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep
UpperCamelCase : str = delgramcounter_rep - rgramcounter
UpperCamelCase : Any = sgramcounter_rep - rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Dict = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Tuple = 1
UpperCamelCase : Tuple = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
UpperCamelCase : int = len(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = ssent.split(" " )
UpperCamelCase : Dict = csent.split(" " )
UpperCamelCase : str = []
UpperCamelCase : Any = []
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : str = []
UpperCamelCase : str = []
UpperCamelCase : Dict = []
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Tuple = []
for rsent in rsents:
UpperCamelCase : List[Any] = rsent.split(" " )
UpperCamelCase : List[str] = []
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase )
elif tokenizer == "penn":
UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase )
else:
UpperCamelCase : Union[str, Any] = sentence
if not return_str:
UpperCamelCase : Tuple = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
UpperCamelCase : Optional[Any] = 0
for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] )
UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase )
return 100 * sari_score
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]:
UpperCamelCase : Optional[Any] = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
UpperCamelCase : Tuple = sacrebleu.corpus_bleu(
_lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({"exact": compute_em(predictions=A_ , references=A_ )} )
return result
| 52
| 0
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowercase : Any = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> None:
'''simple docstring'''
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase)
| 368
|
import re
from filelock import FileLock
try:
import nltk
lowercase : List[str] = True
except (ImportError, ModuleNotFoundError):
lowercase : Tuple = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def A_ ( A__ ) -> str:
re.sub('<n>' , '' , A__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A__ ) )
| 225
| 0
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Tuple:
a_ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
a_ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(__UpperCAmelCase)
from datasets import load_dataset
a_ = load_dataset("nielsr/rvlcdip-demo")
a_ = dataset["train"][0]["image"].convert("RGB")
a_ = image_processor(__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
a_ = outputs.logits
a_ = torch.Size((1, 16))
self.assertEqual(logits.shape , __UpperCAmelCase)
a_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4))
| 243
| 0
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[int] = tmp_path / """file.csv"""
lowercase : Dict = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Optional[Any] = tmp_path / """malformed_file.csv"""
lowercase : Tuple = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Dict = tmp_path / """csv_with_image.csv"""
lowercase : Optional[int] = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Union[str, Any] = tmp_path / """csv_with_label.csv"""
lowercase : List[Any] = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Dict = tmp_path / """csv_with_int_list.csv"""
lowercase : Union[str, Any] = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Any = Csv()
lowercase : Tuple = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message
for record in caplog.records )
@require_pil
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as f:
lowercase : List[Any] = f.read().splitlines()[1]
lowercase : List[str] = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase : int = csv._generate_tables([[csv_file_with_image]] )
lowercase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase : str = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as f:
lowercase : List[Any] = f.read().splitlines()[1:]
lowercase : List[Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase : Optional[int] = csv._generate_tables([[csv_file_with_label]] )
lowercase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase : Optional[int] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Tuple = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda SCREAMING_SNAKE_CASE__ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} )
lowercase : Tuple = csv._generate_tables([[csv_file_with_int_list]] )
lowercase : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase : int = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 285
|
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 285
| 1
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def SCREAMING_SNAKE_CASE_ ( *__A : str , __A : Optional[Union[Dict, Any]] = None , __A : Tuple=True , __A : int=2 ) -> Optional[Any]:
"""simple docstring"""
from .. import __version__
a_ : Dict = take_from
a_ : List[str] = ()
if not isinstance(args[0] , __A ):
a_ : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__A ).base_version ) >= version.parse(__A ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
a_ : Optional[Any] = None
if isinstance(__A , __A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__A ),)
a_ : Optional[int] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__A , __A ):
values += (getattr(__A , __A ),)
a_ : int = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
a_ : Union[str, Any] = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
a_ : str = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , __A , stacklevel=__A )
if isinstance(__A , __A ) and len(__A ) > 0:
a_ : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
a_ : Dict = call_frame.filename
a_ : Union[str, Any] = call_frame.lineno
a_ : Union[str, Any] = call_frame.function
a_ , a_ : Dict = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__A ) == 0:
return
elif len(__A ) == 1:
return values[0]
return values
| 32
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( A_=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = None
a__ = None
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , )
__magic_name__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A_, A_ )
assert "train" in ds
assert isinstance(ds["""train"""], A_ )
assert next(iter(ds["""train"""] ) )
| 88
| 0
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_a = flax_key_tuple[:-1] + ('''weight''',)
_a = torch.permute(_lowerCAmelCase, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_a = flax_key_tuple[:-1] + ('''weight''',)
_a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict ):
"""simple docstring"""
if "metadata" in layer:
_a = layer.split('''metadata''' )
_a = ''''''.join(split_layer[0] )[:-1]
_a = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_a = layer.split('''kvstore''' )
_a = ''''''.join(split_layer[0] )[:-1]
_a = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_a = layer.split('''/''' )
_a = '''/'''.join(split_layer[:-1] )
_a = (split_layer[-1],)
if "kvstore/path" in layer:
_a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_a = '''file'''
else:
_a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = rename_keys(_lowerCAmelCase )
_a = {}
for k, v in current_block.items():
_a = v
_a = new_current_block
torch.save(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int], _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_a = convert_file_size_to_int(_lowerCAmelCase )
_a = []
_a = {}
_a = 0
_a = 0
os.makedirs(_lowerCAmelCase, exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''', '''rb''' ) as fp:
_a = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_a = flatten_dict(_lowerCAmelCase, sep='''/''' )
_a = {}
for layer in checkpoint_info.keys():
_a , _a , _a = get_key_and_tensorstore_dict(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_a = content
else:
_a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_a = torch.tensor(_lowerCAmelCase )
_a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_a , _a = rename_base_flax_keys(tuple(key.split('''/''' ) ), _lowerCAmelCase )
_a = '''/'''.join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_a = os.path.join(
_lowerCAmelCase, weights_name.replace('''.bin''', f'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase, _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_a = {}
_a = 0
_a = raw_weights.to(getattr(_lowerCAmelCase, _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_a = os.path.join(_lowerCAmelCase, weights_name.replace('''.bin''', f'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase, _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_a = {}
_a = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_a = weights_name.replace(
'''.bin''', f'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_a = os.path.join(_lowerCAmelCase, weights_name.replace('''.bin''', f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase, os.path.join(_lowerCAmelCase, _lowerCAmelCase ) )
_a = shard
for key in shard:
_a = shard_file
# Add the metadata
_a = {'''total_size''': total_size}
_a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_lowerCAmelCase, _lowerCAmelCase ), '''w''', encoding='''utf-8''' ) as f:
_a = json.dumps(_lowerCAmelCase, indent=2, sort_keys=_lowerCAmelCase ) + '''\n'''
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__snake_case = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_a = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''', device_map='''auto''' )
_a = TaTokenizer.from_pretrained('''t5-small''' )
_a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_a = tokenizer(_lowerCAmelCase, return_tensors='''pt''' ).input_ids
_a = model.generate(_lowerCAmelCase, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 153
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 153
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = FlaxBertModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ : str ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = FlaxRobertaModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ : str ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
def A ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase = FlaxAutoModel.from_pretrained('bert-base' )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(UpperCamelCase__ , 'Use `from_pt=True` to load this model' ):
UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 28
|
"""simple docstring"""
import re
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
try:
__A = split_input(__UpperCamelCase )
if upper:
__A = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__A = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return to_simple_case(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
__A = to_simple_case(__UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 266
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
while a != 0:
UpperCAmelCase__ : int = b % a, a
return b
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if gcd(lowerCAmelCase__ , lowerCAmelCase__ ) != 1:
UpperCAmelCase__ : List[str] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase__ )
UpperCAmelCase__ : int = 1, 0, a
UpperCAmelCase__ : List[str] = 0, 1, m
while va != 0:
UpperCAmelCase__ : Tuple = ua // va
UpperCAmelCase__ : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 358
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
with pytest.raises(_A ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values()
UpperCAmelCase__ : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A )
@require_tokenizers
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A )
UpperCAmelCase__ : Any = '''Hello, world. How are you?'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase__ : Tuple = get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : Any ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(_A ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = False
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ : int = logging.get_logger(__name__)
class snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_lowerCamelCase = ["pixel_values"]
def __init__( self , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = PILImageResampling.BILINEAR , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = 1 / 255 , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
lowerCamelCase_ = size if size is not None else {"shortest_edge": 256}
lowerCamelCase_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
lowerCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = PILImageResampling.BICUBIC , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase ):
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = ChannelDimension.FIRST , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
lowerCamelCase_ = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_lowerCAmelCase ):
lowerCamelCase_ = target_sizes.numpy()
lowerCamelCase_ = []
for idx in range(len(_lowerCAmelCase ) ):
lowerCamelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_lowerCAmelCase )
lowerCamelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
lowerCamelCase_ = logits.argmax(dim=1 )
lowerCamelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 55
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Optional[Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 225
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self
_lowerCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowercase )
yield node.data
_lowerCAmelCase = node.next_node
@property
def _lowercase ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_lowercase = Node(1)
_lowercase = Node(2)
_lowercase = Node(3)
_lowercase = Node(4)
print(root_node.has_loop) # False
_lowercase = root_node.next_node
print(root_node.has_loop) # True
_lowercase = Node(5)
_lowercase = Node(6)
_lowercase = Node(5)
_lowercase = Node(6)
print(root_node.has_loop) # False
_lowercase = Node(1)
print(root_node.has_loop) # False
| 229
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = '''bert-generation'''
def __init__( self , _lowercase=50_358 , _lowercase=1_024 , _lowercase=24 , _lowercase=16 , _lowercase=4_096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase=2 , _lowercase=1 , _lowercase="absolute" , _lowercase=True , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
| 229
| 1
|
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self , snake_case = 6 ):
snake_case_ = None
snake_case_ = None
self.create_linked_list(snake_case )
def a ( self , snake_case ):
snake_case_ = Node()
snake_case_ = current_node
snake_case_ = current_node
snake_case_ = current_node
for _ in range(1 , snake_case ):
snake_case_ = Node()
snake_case_ = current_node
snake_case_ = previous_node
snake_case_ = current_node
snake_case_ = self.front
snake_case_ = previous_node
def a ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def a ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def a ( self , snake_case ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
snake_case_ = self.rear.next
if self.rear:
snake_case_ = data
def a ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
snake_case_ = self.front.data
snake_case_ = None
return data
snake_case_ = self.front
snake_case_ = old_front.next
snake_case_ = old_front.data
snake_case_ = None
return data
def a ( self ):
if self.is_empty():
raise Exception('Empty Queue' )
def a ( self ):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class lowercase :
def __init__( self ):
snake_case_ = None
snake_case_ = None
snake_case_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285
| 1
|
import string
def __magic_name__ ( __a : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase__ = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase__ = string.ascii_uppercase.find(__a )
UpperCamelCase__ = num - key
if num < 0:
UpperCamelCase__ = num + len(string.ascii_uppercase )
UpperCamelCase__ = translated + string.ascii_uppercase[num]
else:
UpperCamelCase__ = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = input("""Encrypted message: """ )
UpperCamelCase__ = message.upper()
decrypt(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 178
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 178
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
lowerCAmelCase__ = list[tuple[int, int]]
lowerCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCamelCase :
def __init__(self , __a , __a , __a , __a , __a ) -> Union[str, Any]:
UpperCamelCase = pos_x
UpperCamelCase = pos_y
UpperCamelCase = (pos_y, pos_x)
UpperCamelCase = goal_x
UpperCamelCase = goal_y
UpperCamelCase = parent
class _lowerCamelCase :
def __init__(self , __a , __a ) -> Optional[Any]:
UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __a )
UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __a )
UpperCamelCase = [self.start]
UpperCamelCase = False
def snake_case_ (self ) -> Path | None:
while self.node_queue:
UpperCamelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase = True
return self.retrace_path(__a )
UpperCamelCase = self.get_successors(__a )
for node in successors:
self.node_queue.append(__a )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ (self , __a ) -> list[Node]:
UpperCamelCase = []
for action in delta:
UpperCamelCase = parent.pos_x + action[1]
UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__a , __a , self.target.pos_y , self.target.pos_x , __a ) )
return successors
def snake_case_ (self , __a ) -> Path:
UpperCamelCase = node
UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase = current_node.parent
path.reverse()
return path
class _lowerCamelCase :
def __init__(self , __a , __a ) -> Any:
UpperCamelCase = BreadthFirstSearch(__a , __a )
UpperCamelCase = BreadthFirstSearch(__a , __a )
UpperCamelCase = False
def snake_case_ (self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCamelCase = self.fwd_bfs.node_queue.pop(0 )
UpperCamelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCamelCase = True
return self.retrace_bidirectional_path(
__a , __a )
UpperCamelCase = current_bwd_node
UpperCamelCase = current_fwd_node
UpperCamelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__a ),
self.bwd_bfs: self.bwd_bfs.get_successors(__a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case_ (self , __a , __a ) -> Path:
UpperCamelCase = self.fwd_bfs.retrace_path(__a )
UpperCamelCase = self.bwd_bfs.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BreadthFirstSearch(init, goal)
lowerCAmelCase__ = bfs.search()
lowerCAmelCase__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase__ = bd_bfs.search()
lowerCAmelCase__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 153
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 4_000_000 ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = b, a + b
return sum(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
_lowerCAmelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
_lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
_lowerCAmelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
_lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
_lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
_lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(lowerCAmelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
_lowerCAmelCase = config.decoder_hidden_size
_lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = config.hidden_size
_lowerCAmelCase = """vit.encoder.layer."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 24
_lowerCAmelCase = 16
elif "huge" in checkpoint_url:
_lowerCAmelCase = 14
_lowerCAmelCase = 12_80
_lowerCAmelCase = 51_20
_lowerCAmelCase = 32
_lowerCAmelCase = 16
_lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase )
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""model"""]
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
_lowerCAmelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_lowerCAmelCase = model(**lowerCAmelCase )
_lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
_lowerCAmelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : str =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 220
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = cva.getAffineTransform(lowerCAmelCase , lowerCAmelCase )
return cva.warpAffine(lowerCAmelCase , lowerCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
A__ : Any =cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__ : Tuple =gray_img.shape
# set different points to rotate image
A__ : Optional[int] =np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
A__ : Tuple =np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
A__ : List[str] =np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
A__ : Dict =np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
A__ : List[Any] =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__ : Tuple =plt.figure(1)
A__ : int =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 220
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 148
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
_UpperCAmelCase : List[str] =2**power
_UpperCAmelCase : Optional[int] =str(__lowerCamelCase )
_UpperCAmelCase : List[str] =list(__lowerCamelCase )
_UpperCAmelCase : List[Any] =0
for i in list_num:
sum_of_num += int(__lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
lowercase =int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
lowercase =solution(power)
print('Sum of the digits is: ', result)
| 355
|
'''simple docstring'''
from string import ascii_uppercase
lowercase ={str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 3_6:
raise ValueError('base must be <= 36' )
_UpperCAmelCase : Union[str, Any] =''
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : str =0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : int =divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 1_1 and 9 < mod < 3_6:
_UpperCAmelCase : str =ALPHABET_VALUES[str(__lowerCamelCase )]
else:
_UpperCAmelCase : Any =str(__lowerCamelCase )
new_value += actual_value
_UpperCAmelCase : Union[str, Any] =num // base
_UpperCAmelCase : Dict =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Dict=30 , SCREAMING_SNAKE_CASE__ : Dict=4_00 , SCREAMING_SNAKE_CASE__ : int=3 , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_88}
__lowerCAmelCase = size_divisor
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_pad
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
def a ( self : Optional[Any] ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> int:
if not batched:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
__lowerCAmelCase = size / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
__lowerCAmelCase , __lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase = scale * h, size
__lowerCAmelCase = int((13_33 / 8_00) * size )
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > max_size:
__lowerCAmelCase = max_size / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCAmelCase , __lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None
def a ( self : Any ) -> int:
__lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def a ( self : Optional[Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_std""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size_divisor""" ) )
def a ( self : Dict ) -> Optional[int]:
pass
def a ( self : int ) -> List[Any]:
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : List[str] ) -> Union[str, Any]:
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : int ) -> Optional[Any]:
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 229
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : Any = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = """ctrl"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=24_65_34 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12_80 , SCREAMING_SNAKE_CASE__ : str=81_92 , SCREAMING_SNAKE_CASE__ : List[str]=48 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1e-6 , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = dff
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 229
| 1
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( lowerCAmelCase__ : str = "" ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
__UpperCAmelCase : Dict = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , """html.parser""" )
__UpperCAmelCase : int = soup.find_all("""td""" , attrs="""titleColumn""" )
__UpperCAmelCase : Tuple = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase__ , lowerCAmelCase__ )
}
def lowercase_ ( lowerCAmelCase__ : str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = get_imdb_top_aaa_movies()
with open(lowerCAmelCase__ , """w""" , newline="""""" ) as out_file:
__UpperCAmelCase : Optional[int] = csv.writer(lowerCAmelCase__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 367
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
__UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ )
for answer_list in data[1]:
__UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : str = [[reference] for reference in references]
__UpperCAmelCase : Optional[int] = 0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : int = 100.0 * em / total
__UpperCAmelCase : Dict = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = args.k
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = 0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] )
__UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCAmelCase : List[str] = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ):
"""simple docstring"""
def strip_title(lowerCAmelCase__ : Optional[int] ):
if title.startswith("""\"""" ):
__UpperCAmelCase : List[Any] = title[1:]
if title.endswith("""\"""" ):
__UpperCAmelCase : int = title[:-1]
return title
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device )
__UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ )
__UpperCAmelCase : int = question_enc_outputs[0]
__UpperCAmelCase : Dict = rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCAmelCase : Union[str, Any] = []
for docs in all_docs:
__UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCAmelCase__ ) )
return provenance_strings
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
with torch.no_grad():
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device )
__UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device )
__UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {}
if args.model_type is None:
__UpperCAmelCase : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__UpperCAmelCase : Dict = args.n_docs
if args.index_name is not None:
__UpperCAmelCase : Union[str, Any] = args.index_name
if args.index_path is not None:
__UpperCAmelCase : Dict = args.index_path
else:
__UpperCAmelCase : str = BartForConditionalGeneration
__UpperCAmelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
__UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__UpperCAmelCase : Union[str, Any] = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
__UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" )
preds_file.flush()
__UpperCAmelCase : List[str] = []
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_UpperCamelCase = get_args()
main(args)
| 16
| 0
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def __UpperCAmelCase ( a_):
if hor == 1_28:
snake_case_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
snake_case_ = (32, 1_28, 2_56)
snake_case_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
snake_case_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
snake_case_ = (32, 64, 1_28, 2_56)
snake_case_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
snake_case_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''')
snake_case_ = model.state_dict()
snake_case_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
snake_case_ = UNetaDModel(**a_)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
snake_case_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
snake_case_ = state_dict.pop(a_)
hf_value_function.load_state_dict(a_)
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''')
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w') as f:
json.dump(a_ , a_)
def __UpperCAmelCase ( ):
snake_case_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
snake_case_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch')
snake_case_ = model
snake_case_ = UNetaDModel(**a_)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
snake_case_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
snake_case_ = state_dict.pop(a_)
hf_value_function.load_state_dict(a_)
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin')
with open('hub/hopper-medium-v2/value_function/config.json' , 'w') as f:
json.dump(a_ , a_)
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 178
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_ , a_=False):
snake_case_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head'):
snake_case_ = 'segformer.encoder.' + key
if key.startswith('backbone'):
snake_case_ = key.replace('backbone' , 'segformer.encoder')
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case_ = key[key.find('patch_embed') + len('patch_embed')]
snake_case_ = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(a_)-1}''')
if "norm" in key:
snake_case_ = key.replace('norm' , 'layer_norm')
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case_ = key[key.find('segformer.encoder.layer_norm') + len('segformer.encoder.layer_norm')]
snake_case_ = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(a_)-1}''')
if "layer_norm1" in key:
snake_case_ = key.replace('layer_norm1' , 'layer_norm_1')
if "layer_norm2" in key:
snake_case_ = key.replace('layer_norm2' , 'layer_norm_2')
if "block" in key:
# replace for example block1 by block.0
snake_case_ = key[key.find('block') + len('block')]
snake_case_ = key.replace(f'''block{idx}''' , f'''block.{int(a_)-1}''')
if "attn.q" in key:
snake_case_ = key.replace('attn.q' , 'attention.self.query')
if "attn.proj" in key:
snake_case_ = key.replace('attn.proj' , 'attention.output.dense')
if "attn" in key:
snake_case_ = key.replace('attn' , 'attention.self')
if "fc1" in key:
snake_case_ = key.replace('fc1' , 'dense1')
if "fc2" in key:
snake_case_ = key.replace('fc2' , 'dense2')
if "linear_pred" in key:
snake_case_ = key.replace('linear_pred' , 'classifier')
if "linear_fuse" in key:
snake_case_ = key.replace('linear_fuse.conv' , 'linear_fuse')
snake_case_ = key.replace('linear_fuse.bn' , 'batch_norm')
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case_ = key[key.find('linear_c') + len('linear_c')]
snake_case_ = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(a_)-1}''')
if key.startswith('head'):
snake_case_ = key.replace('head' , 'classifier')
snake_case_ = value
return new_state_dict
def __UpperCAmelCase ( a_ , a_):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks):
for j in range(config.depths[i]):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''')
snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''')
# next, add keys and values (in that order) to the state dict
snake_case_ = kv_weight[
: config.hidden_sizes[i], :
]
snake_case_ = kv_bias[: config.hidden_sizes[i]]
snake_case_ = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case_ = kv_bias[
config.hidden_sizes[i] :
]
def __UpperCAmelCase ( ):
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(a_ , stream=a_).raw)
return image
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = SegformerConfig()
snake_case_ = False
# set attributes based on model_name
snake_case_ = 'huggingface/label-files'
if "segformer" in model_name:
snake_case_ = model_name[len('segformer.') : len('segformer.') + 2]
if "ade" in model_name:
snake_case_ = 1_50
snake_case_ = 'ade20k-id2label.json'
snake_case_ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
snake_case_ = 19
snake_case_ = 'cityscapes-id2label.json'
snake_case_ = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'''Model {model_name} not supported''')
elif "mit" in model_name:
snake_case_ = True
snake_case_ = model_name[4:6]
snake_case_ = 10_00
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = (1, 10_00)
else:
raise ValueError(f'''Model {model_name} not supported''')
# set config attributes
snake_case_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset') , 'r'))
snake_case_ = {int(a_): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 2_56
elif size == "b2":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 4, 6, 3]
elif size == "b3":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 4, 18, 3]
elif size == "b4":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 8, 27, 3]
elif size == "b5":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 6, 40, 3]
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor (only resize + normalize)
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=a_ , align=a_ , do_random_crop=a_)
# prepare image
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a_ , return_tensors='pt').pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
if encoder_only:
snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))
else:
snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))['state_dict']
# rename keys
snake_case_ = rename_keys(a_ , encoder_only=a_)
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_)
# create HuggingFace model and load state dict
if encoder_only:
snake_case_ = False
snake_case_ = SegformerForImageClassification(a_)
else:
snake_case_ = SegformerForSemanticSegmentation(a_)
model.load_state_dict(a_)
model.eval()
# forward pass
snake_case_ = model(a_)
snake_case_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
])
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
])
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
])
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
])
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
])
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case_ = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
])
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
])
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
])
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case_ = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
])
elif model_name == "segformer.b0.768x768.city.160k":
snake_case_ = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
])
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
])
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
])
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
])
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
])
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
])
else:
snake_case_ = logits.argmax(-1).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx])
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1E-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
image_processor.save_pretrained(a_)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 178
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowerCamelCase (UpperCAmelCase__ : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowerCamelCase ():
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE = [1, 2, 3]
with pytest.raises(UpperCAmelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=2 )
with pytest.raises(UpperCAmelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = [1, 2]
SCREAMING_SNAKE_CASE = {"a": 1, "b": 2}
SCREAMING_SNAKE_CASE = {"a": [1, 2], "b": [3, 4]}
SCREAMING_SNAKE_CASE = {"a": {"1": 1}, "b": 2}
SCREAMING_SNAKE_CASE = {"a": 1, "b": 2, "c": 3, "d": 4}
SCREAMING_SNAKE_CASE = [2, 3]
SCREAMING_SNAKE_CASE = {"a": 2, "b": 3}
SCREAMING_SNAKE_CASE = {"a": [2, 3], "b": [4, 5]}
SCREAMING_SNAKE_CASE = {"a": {"1": 2}, "b": 3}
SCREAMING_SNAKE_CASE = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) == expected_map_nested_sa
| 363
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : List[Any] = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Any = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names}
_lowerCamelCase : Optional[Any] = {f"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = FunnelTokenizer
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = 2
def __init__( self : str , _UpperCamelCase : str=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : str="<unk>" , _UpperCamelCase : Optional[Any]="<sep>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : int="<cls>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : Union[str, Any]="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=None , _UpperCamelCase : Dict="##" , **_UpperCamelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , clean_text=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , wordpieces_prefix=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase = [1]
for i in range(2 , __snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase = []
lowercase = list(range(__snake_case ) )
# Find permutation
while factorials:
lowercase = factorials.pop()
lowercase , lowercase = divmod(__snake_case , __snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__snake_case )
# Let's go
lowercase = parser.parse_args()
if not hasattr(__snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase = args.func(__snake_case )
service.run()
if __name__ == "__main__":
main()
| 220
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( A__):
snake_case__ : Optional[Any] = 'poolformer'
def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : Tuple=[2, 2, 6, 2] , __lowerCAmelCase : Any=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : Optional[int]=[7, 3, 3, 3] , __lowerCAmelCase : int=[4, 2, 2, 2] , __lowerCAmelCase : Optional[int]=[2, 1, 1, 1] , __lowerCAmelCase : str=4 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : Dict=0.02 , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : str = num_channels
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Tuple = stride
_lowerCamelCase : str = padding
_lowerCamelCase : Any = pool_size
_lowerCamelCase : List[str] = hidden_sizes
_lowerCamelCase : List[Any] = mlp_ratio
_lowerCamelCase : List[str] = depths
_lowerCamelCase : Optional[int] = patch_sizes
_lowerCamelCase : Tuple = strides
_lowerCamelCase : Tuple = num_encoder_blocks
_lowerCamelCase : List[Any] = drop_path_rate
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Tuple = use_layer_scale
_lowerCamelCase : Optional[Any] = layer_scale_init_value
_lowerCamelCase : int = initializer_range
super().__init__(**lowerCamelCase__ )
class __snake_case ( A__):
snake_case__ : str = version.parse("1.11")
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return 2E-3
| 357
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = BlenderbotSmallTokenizer
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : str = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_lowerCamelCase : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Any = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_lowerCamelCase : List[str] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Tuple = '''adapt act apte'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Optional[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
_lowerCamelCase : List[str] = '''I am a small frog.'''
_lowerCamelCase : str = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Any = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_lowerCamelCase : Optional[Any] = '''I am a small frog .'''
_lowerCamelCase : str = '''.'''
_lowerCamelCase : str = tok(__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Dict = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 175
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = 0.00
UpperCAmelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase__ = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def lowerCAmelCase_ ( __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = 0.00
UpperCAmelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase__ = f"""Resistor at index {index} has a negative value!"""
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_A = logging.get_logger(__name__)
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = question_encoder
lowerCAmelCase__ : Optional[int] = generator
lowerCAmelCase__ : Optional[int] = self.question_encoder
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
if os.path.isfile(UpperCamelCase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """question_encoder_tokenizer""" )
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase )
self.generator.save_pretrained(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , UpperCamelCase : List[str] , **UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCAmelCase__ : Dict = kwargs.pop("""config""" , UpperCamelCase )
if config is None:
lowerCAmelCase__ : int = RagConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase , generator=UpperCamelCase )
def __call__( self : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return self.current_tokenizer(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.generator.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.generator.decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.question_encoder
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.generator
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "longest" , UpperCamelCase : str = None , UpperCamelCase : bool = True , **UpperCamelCase : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase , )
if max_length is None:
lowerCAmelCase__ : Any = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , max_length=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCAmelCase__ : Tuple = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
text_target=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Any = labels["""input_ids"""]
return model_inputs
| 242
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 365
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"image": Image()} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} )
_SCREAMING_SNAKE_CASE : str = "image"
_SCREAMING_SNAKE_CASE : str = "labels"
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCAmelCase : int = copy.deepcopy(self )
__UpperCAmelCase : str = self.label_schema.copy()
__UpperCAmelCase : Optional[Any] = features[self.label_column]
__UpperCAmelCase : Optional[int] = label_schema
return task_template
@property
def __A ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 16
| 0
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowercase : int = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
return field(default_factory=lambda: default , metadata=__lowerCamelCase )
@dataclass
class __UpperCamelCase :
A_ = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
A_ = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
A_ = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
A_ = field(
default=A_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
A_ = field(
default=A_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
A_ = field(
default=A_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
A_ = field(default=A_ , metadata={"help": "Use FP16 to accelerate inference."} )
A_ = field(default=A_ , metadata={"help": "Benchmark training of model"} )
A_ = field(default=A_ , metadata={"help": "Verbose memory tracing"} )
A_ = field(
default=A_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
A_ = field(
default=A_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
A_ = field(default=A_ , metadata={"help": "Trace memory line by line"} )
A_ = field(default=A_ , metadata={"help": "Save result to a CSV file"} )
A_ = field(default=A_ , metadata={"help": "Save all print statements in a log file"} )
A_ = field(default=A_ , metadata={"help": "Whether to print environment information"} )
A_ = field(
default=A_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
A_ = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
A_ = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
A_ = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
A_ = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
A_ = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
A_ = field(
default=f"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
A_ = field(default=3 , metadata={"help": "Times an experiment will be run."} )
A_ = field(
default=A_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _snake_case , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 27
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16
| 0
|
"""simple docstring"""
import sys
lowerCAmelCase_ : List[str] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _lowerCAmelCase ( lowerCAmelCase = N ):
'''simple docstring'''
UpperCAmelCase = -sys.maxsize - 1
for i in range(len(_lowerCAmelCase ) - 12 ):
UpperCAmelCase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
UpperCAmelCase = product
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 354
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class UpperCamelCase_ ( a_ ):
_A : List[Any] = 'layoutlmv3'
def __init__( self , snake_case__=5_02_65 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=10_24 , snake_case__=1_28 , snake_case__=1_28 , snake_case__=True , snake_case__=32 , snake_case__=1_28 , snake_case__=64 , snake_case__=2_56 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=2_24 , snake_case__=3 , snake_case__=16 , snake_case__=None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = max_ad_position_embeddings
UpperCAmelCase = coordinate_size
UpperCAmelCase = shape_size
UpperCAmelCase = has_relative_attention_bias
UpperCAmelCase = rel_pos_bins
UpperCAmelCase = max_rel_pos
UpperCAmelCase = has_spatial_attention_bias
UpperCAmelCase = rel_ad_pos_bins
UpperCAmelCase = max_rel_ad_pos
UpperCAmelCase = text_embed
UpperCAmelCase = visual_embed
UpperCAmelCase = input_size
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
_A : str = version.parse('1.12' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 12
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 248
| 0
|
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if not sentence:
return ""
lowerCAmelCase_ : Optional[int] = dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 224
|
'''simple docstring'''
import argparse
import struct
import unittest
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : List[str] = data
# Initialize hash values
A_ : Tuple = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
A_ : List[Any] = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
A_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a (lowercase ):
A_ : Any = b"""\x80""" + (b"""\x00""" * (63 - (len(lowercase ) + 8) % 64))
A_ : Optional[Any] = struct.pack(""">Q""" , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def _a (self ):
# Convert into blocks of 64 bytes
A_ : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A_ : Tuple = list(struct.unpack(""">16L""" , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
A_, A_, A_, A_, A_, A_, A_, A_ : Dict = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A_ : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A_ : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A_ : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
A_ : Union[str, Any] = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
A_ : List[Any] = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
A_ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
A_ : List[str] = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
A_ : str = (a & b) ^ (a & c) ^ (b & c)
A_ : int = (sa + maj) % 0X1_0000_0000
A_, A_, A_, A_, A_, A_, A_, A_ : List[Any] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
A_ : int = [a, b, c, d, e, f, g, h]
# Modify final values
A_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
A_ : Any = """""".join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def _a (self , lowercase , lowercase ):
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
import hashlib
A_ : Optional[Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def a ( ):
'''simple docstring'''
import doctest
doctest.testmod()
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
A_ : List[str] = parser.parse_args()
A_ : Tuple = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
A_ : Union[str, Any] = f.read()
else:
A_ : Optional[Any] = bytes(lowerCamelCase__ , """utf-8""" )
print(SHAaaa(lowerCamelCase__ ).hash )
if __name__ == "__main__":
main()
| 206
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _a ( unittest.TestCase ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0.0_2 ,) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def _lowercase ( self ) -> Optional[int]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
return config, pixel_values
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
_snake_case = FlaxViTModel(config=_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_snake_case = (self.image_size, self.image_size)
_snake_case = (self.patch_size, self.patch_size)
_snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
_snake_case = self.type_sequence_label_size
_snake_case = FlaxViTForImageClassification(config=_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = FlaxViTForImageClassification(_SCREAMING_SNAKE_CASE )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> int:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _a ( __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase ( self ) -> None:
_snake_case = FlaxViTModelTester(self )
_snake_case = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def _lowercase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[int]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_SCREAMING_SNAKE_CASE )
_snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Any:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with self.subTest("JIT Enabled" ):
_snake_case = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_snake_case = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def _lowercase ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained("google/vit-base-patch16-224" )
_snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 142
|
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ : Tuple = '''https://zenquotes.io/api'''
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase_ : Any = random_quotes()
pprint.pprint(response)
| 142
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_00, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""")
| 70
|
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = 0
for ch in input_str:
UpperCamelCase_ : Tuple = ord(lowerCamelCase )
UpperCamelCase_ : str = pow(2 , lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 1
|
from __future__ import annotations
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int = None ) -> list[list[str]]:
UpperCAmelCase : List[str] = word_bank or []
# create a table
UpperCAmelCase : int = len(__lowerCamelCase ) + 1
UpperCAmelCase : list[list[list[str]]] = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
UpperCAmelCase : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
UpperCAmelCase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 336
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16
| 0
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE_ = _symbol_database.Default()
SCREAMING_SNAKE_CASE_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE_ = 45
SCREAMING_SNAKE_CASE_ = 1581
SCREAMING_SNAKE_CASE_ = 1517
SCREAMING_SNAKE_CASE_ = 1570
SCREAMING_SNAKE_CASE_ = 1584
SCREAMING_SNAKE_CASE_ = 1793
SCREAMING_SNAKE_CASE_ = 1795
SCREAMING_SNAKE_CASE_ = 1916
SCREAMING_SNAKE_CASE_ = 1864
SCREAMING_SNAKE_CASE_ = 1905
SCREAMING_SNAKE_CASE_ = 1919
SCREAMING_SNAKE_CASE_ = 2429
SCREAMING_SNAKE_CASE_ = 2208
SCREAMING_SNAKE_CASE_ = 2418
SCREAMING_SNAKE_CASE_ = 2323
SCREAMING_SNAKE_CASE_ = 2407
# @@protoc_insertion_point(module_scope)
| 358
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 25_00_04
A_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MBartaaTokenizer
lowercase__ = MBartaaTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : List[str] = MBartaaTokenizer(a_, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = """<s>"""
_snake_case : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """<mask>""" )
self.assertEqual(len(a_ ), 1_054 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_054 )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = MBartaaTokenizer(a_, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=a_ )
_snake_case : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_snake_case : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""], )
_snake_case : Any = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_snake_case : Any = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""], )
@slow
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""facebook/mbart-large-50""", revision="""d3913889c59cd5c9e456b269c376325eabad57e2""", )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case : Dict = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[str] = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Tuple = tokenizer_r.save_pretrained(a_ )
_snake_case : Tuple = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_snake_case : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(a_, a_ )
# Checks everything loads correctly in the same way
_snake_case : List[Any] = tokenizer_r.from_pretrained(a_ )
_snake_case : Any = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=True
_snake_case : Any = tempfile.mkdtemp()
_snake_case : List[str] = tokenizer_r.save_pretrained(a_, legacy_format=a_ )
_snake_case : List[str] = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files
self.assertSequenceEqual(a_, a_ )
# Checks everything loads correctly in the same way
_snake_case : Any = tokenizer_r.from_pretrained(a_ )
_snake_case : List[Any] = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=False
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Dict = tokenizer_r.save_pretrained(a_, legacy_format=a_ )
_snake_case : Optional[Any] = tokenizer_p.save_pretrained(a_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case : Dict = tokenizer_r.from_pretrained(a_ )
_snake_case : int = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
shutil.rmtree(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = "facebook/mbart-large-50-one-to-many-mmt"
lowercase__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowercase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowercase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCamelCase_ ( cls: Optional[Any] ):
'''simple docstring'''
_snake_case : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="""en_XX""", tgt_lang="""ro_RO""" )
_snake_case : Union[str, Any] = 1
return cls
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""], 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""], 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""], 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""], 250_038 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.assertIn(a_, self.tokenizer.all_special_ids )
_snake_case : Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
_snake_case : Dict = self.tokenizer.decode(a_, skip_special_tokens=a_ )
_snake_case : Tuple = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=a_ )
self.assertEqual(a_, a_ )
self.assertNotIn(self.tokenizer.eos_token, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0], a_ )
_snake_case : Any = 10
_snake_case : List[Any] = self.tokenizer(a_, max_length=a_, truncation=a_ ).input_ids[0]
self.assertEqual(ids[0], a_ )
self.assertEqual(ids[-1], 2 )
self.assertEqual(len(a_ ), a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ), [250_053, 250_001] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
_snake_case : Any = MBartaaTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, a_ )
@require_torch
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=a_, return_tensors="""pt""" )
_snake_case : Any = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=a_, truncation=a_, max_length=len(self.expected_src_tokens ), return_tensors="""pt""", )
_snake_case : List[Any] = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
self.assertIsInstance(a_, a_ )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
_snake_case : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, a_ )
self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer(self.src_text, padding=a_, truncation=a_, max_length=3, return_tensors="""pt""" )
_snake_case : int = self.tokenizer(
text_target=self.tgt_text, padding=a_, truncation=a_, max_length=10, return_tensors="""pt""" )
_snake_case : Union[str, Any] = targets["""input_ids"""]
_snake_case : Optional[int] = shift_tokens_right(a_, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer._build_translation_inputs(
"""A test""", return_tensors="""pt""", src_lang="""en_XX""", tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(a_ ), {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
}, )
| 64
|
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[Any] = min(a__) # min() finds the minimum value
a_ : List[str] = max(a__) # max() finds the maximum value
a_ : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
a_ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
a_ : Tuple = 0
for count in range(a__):
while holes[count] > 0:
holes[count] -= 1
a_ : Optional[Any] = count + min_val
i += 1
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__)
print("""Sorted order is:""" , """ """.join(a__))
if __name__ == "__main__":
main()
| 248
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return EnvironmentCommand()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> List[str]:
"""simple docstring"""
lowercase__ = parser.add_parser('''env''' )
download_parser.set_defaults(func=UpperCamelCase_ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=UpperCamelCase_ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: Dict , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = accelerate_config_file
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''not installed'''
if is_safetensors_available():
import safetensors
lowercase__ = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
lowercase__ = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
lowercase__ = '''not installed'''
lowercase__ = lowercase__ = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowercase__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase_ ):
lowercase__ = load_config_from_file(self._accelerate_config_file ).to_dict()
lowercase__ = (
'''\n'''.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else f'\t{accelerate_config}'
)
lowercase__ = '''not installed'''
lowercase__ = '''NA'''
if is_torch_available():
import torch
lowercase__ = torch.__version__
lowercase__ = torch.cuda.is_available()
lowercase__ = '''not installed'''
lowercase__ = '''NA'''
if is_tf_available():
import tensorflow as tf
lowercase__ = tf.__version__
try:
# deprecated in v2.1
lowercase__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowercase__ = bool(tf.config.list_physical_devices('''GPU''' ) )
lowercase__ = '''not installed'''
lowercase__ = '''not installed'''
lowercase__ = '''not installed'''
lowercase__ = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowercase__ = flax.__version__
lowercase__ = jax.__version__
lowercase__ = jaxlib.__version__
lowercase__ = jax.lib.xla_bridge.get_backend().platform
lowercase__ = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'{safetensors_version}',
'''Accelerate version''': f'{accelerate_version}',
'''Accelerate config''': f'{accelerate_config_str}',
'''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})',
'''Tensorflow version (GPU?)''': f'{tf_version} ({tf_cuda_available})',
'''Flax version (CPU?/GPU?/TPU?)''': f'{flax_version} ({jax_backend})',
'''Jax version''': f'{jax_version}',
'''JaxLib version''': f'{jaxlib_version}',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(UpperCamelCase_ ) )
return info
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: List[Any] ) -> List[Any]:
"""simple docstring"""
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 359
|
from abc import ABC, abstractmethod
from typing import List, Optional
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.test()
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = False
while not completed:
if counter == 1:
self.reset()
lowercase__ = self.advance()
if not self.does_advance(UpperCamelCase_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase__ , lowercase__ , lowercase__ = self.update(UpperCamelCase_ )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> str:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=False ) -> Any:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class _a ( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase_: List[int] ) -> Tuple:
"""simple docstring"""
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
lowercase__ = token_ids
lowercase__ = len(self.token_ids )
lowercase__ = -1 # the index of the currently fulfilled step
lowercase__ = False
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(UpperCamelCase_ ):
self.fulfilled_idx += 1
lowercase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ = True
lowercase__ = completed
else:
# failed to make progress.
lowercase__ = True
self.reset()
return stepped, completed, reset
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = False
lowercase__ = 0
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
lowercase__ = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.fulfilled_idx
lowercase__ = self.completed
return new_constraint
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: List[List[int]] , UpperCamelCase_: int=True ) -> int:
"""simple docstring"""
lowercase__ = max([len(UpperCamelCase_ ) for one in nested_token_ids] )
lowercase__ = {}
for token_ids in nested_token_ids:
lowercase__ = root
for tidx, token_id in enumerate(UpperCamelCase_ ):
if token_id not in level:
lowercase__ = {}
lowercase__ = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f' {nested_token_ids}.' )
lowercase__ = root
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> Any:
"""simple docstring"""
lowercase__ = self.trie
for current_token in current_seq:
lowercase__ = start[current_token]
lowercase__ = list(start.keys() )
return next_tokens
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.next_tokens(UpperCamelCase_ )
return len(UpperCamelCase_ ) == 0
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = list(root.values() )
if len(UpperCamelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase_ ) for nn in next_nodes] )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.count_leaves(UpperCamelCase_ )
return len(UpperCamelCase_ ) != leaf_count
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[int] , UpperCamelCase_: List[List[int]] ) -> List[Any]:
"""simple docstring"""
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(UpperCamelCase_ , UpperCamelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
lowercase__ = DisjunctiveTrie(UpperCamelCase_ )
lowercase__ = nested_token_ids
lowercase__ = self.trie.max_height
lowercase__ = []
lowercase__ = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self: str , UpperCamelCase_: int ) -> Tuple:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
lowercase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase_ ( self: int , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(UpperCamelCase_ ):
self.current_seq.append(UpperCamelCase_ )
lowercase__ = True
else:
lowercase__ = True
self.reset()
lowercase__ = self.trie.reached_leaf(self.current_seq )
lowercase__ = completed
return stepped, completed, reset
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = False
lowercase__ = []
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any]=False ) -> str:
"""simple docstring"""
lowercase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.current_seq
lowercase__ = self.completed
return new_constraint
class _a :
def __init__( self: int , UpperCamelCase_: List[Constraint] ) -> Dict:
"""simple docstring"""
lowercase__ = constraints
# max # of steps required to fulfill a given constraint
lowercase__ = max([c.seqlen for c in constraints] )
lowercase__ = len(UpperCamelCase_ )
lowercase__ = False
self.init_state()
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = None
lowercase__ = [constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.constraints]
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ = constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
else:
lowercase__ = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[List[int]] ) -> Optional[int]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ , lowercase__ = self.add(UpperCamelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
lowercase__ , lowercase__ = False, False
if self.completed:
lowercase__ = True
lowercase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ , lowercase__ , lowercase__ = self.inprogress_constraint.update(UpperCamelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) )
lowercase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase_ ):
lowercase__ , lowercase__ , lowercase__ = pending_constraint.update(UpperCamelCase_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(UpperCamelCase_ )
lowercase__ = None
if not complete and stepped:
lowercase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ = [
constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ = self.inprogress_constraint.copy(stateful=UpperCamelCase_ )
lowercase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 93
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , A : Tuple ) ->Tuple:
super().__init__()
lowerCamelCase__ : Optional[Any] = torchvision.models.resnetaaa(pretrained=A )
lowerCamelCase__ : List[Any] = list(model.children() )[:-2]
lowerCamelCase__ : Tuple = nn.Sequential(*A )
lowerCamelCase__ : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __lowerCamelCase ( self : Any , A : Any ) ->str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase__ : Optional[Any] = self.pool(self.model(A ) )
lowerCamelCase__ : str = torch.flatten(A , start_dim=2 )
lowerCamelCase__ : Optional[int] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : List[Any] , A : Tuple , A : int , A : Optional[int] , A : List[str] , A : Dict ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = [json.loads(A ) for l in open(A )]
lowerCamelCase__ : List[Any] = os.path.dirname(A )
lowerCamelCase__ : Optional[Any] = tokenizer
lowerCamelCase__ : Union[str, Any] = labels
lowerCamelCase__ : Tuple = len(A )
lowerCamelCase__ : Optional[int] = max_seq_length
lowerCamelCase__ : List[str] = transforms
def __len__( self : Tuple ) ->int:
return len(self.data )
def __getitem__( self : Tuple , A : int ) ->List[Any]:
lowerCamelCase__ : str = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=A ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase__ : Any = sentence[: self.max_seq_length]
lowerCamelCase__ : int = torch.zeros(self.n_classes )
lowerCamelCase__ : str = 1
lowerCamelCase__ : Any = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
lowerCamelCase__ : Tuple = self.transforms(A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __lowerCamelCase ( self : Tuple ) ->Tuple:
lowerCamelCase__ : Optional[Any] = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = [len(row['''sentence'''] ) for row in batch]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = len(UpperCAmelCase ), max(UpperCAmelCase )
lowerCamelCase__ : List[str] = torch.zeros(UpperCAmelCase , UpperCAmelCase , dtype=torch.long )
lowerCamelCase__ : Optional[int] = torch.zeros(UpperCAmelCase , UpperCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
lowerCamelCase__ : List[Any] = input_row['''sentence''']
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Union[str, Any] = torch.stack([row['''image'''] for row in batch] )
lowerCamelCase__ : Optional[Any] = torch.stack([row['''label'''] for row in batch] )
lowerCamelCase__ : Optional[Any] = torch.stack([row['''image_start_token'''] for row in batch] )
lowerCamelCase__ : Optional[Any] = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _a ( ) -> str:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _a ( ) -> Optional[Any]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 142
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
lowerCamelCase__ : Union[str, Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase__ : Dict = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
lowerCamelCase__ : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase__ : int = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
lowerCamelCase__ : str = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase__ : Any = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
lowerCamelCase__ : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
if split_mlp_wi:
lowerCamelCase__ : Dict = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
lowerCamelCase__ : List[Any] = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
lowerCamelCase__ : str = (wi_a, wi_a)
else:
lowerCamelCase__ : Optional[int] = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
lowerCamelCase__ : Tuple = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def _a ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = traverse_util.flatten_dict(variables['''target'''] )
lowerCamelCase__ : Union[str, Any] = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase__ : Any = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase )
lowerCamelCase__ : List[str] = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase__ : List[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''attention''' )
lowerCamelCase__ : Optional[Any] = layer_norm
lowerCamelCase__ : Tuple = k.T
lowerCamelCase__ : Tuple = o.T
lowerCamelCase__ : List[Any] = q.T
lowerCamelCase__ : Optional[int] = v.T
# Block i, layer 1 (MLP).
lowerCamelCase__ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : Any = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , UpperCAmelCase )
lowerCamelCase__ : Any = layer_norm
if split_mlp_wi:
lowerCamelCase__ : Any = wi[0].T
lowerCamelCase__ : Any = wi[1].T
else:
lowerCamelCase__ : Tuple = wi.T
lowerCamelCase__ : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Tuple = tax_relpos_bias_lookup(
UpperCAmelCase , UpperCAmelCase , '''encoder''' ).T
lowerCamelCase__ : List[Any] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowerCamelCase__ : Optional[Any] = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''encoder''' ).T
lowerCamelCase__ : Any = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''self_attention''' )
lowerCamelCase__ : Tuple = layer_norm
lowerCamelCase__ : Tuple = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : List[Any] = q.T
lowerCamelCase__ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase__ : Dict = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowerCamelCase__ : int = layer_norm
lowerCamelCase__ : int = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : Dict = q.T
lowerCamelCase__ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowerCamelCase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : int = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = layer_norm
if split_mlp_wi:
lowerCamelCase__ : List[str] = wi[0].T
lowerCamelCase__ : Optional[int] = wi[1].T
else:
lowerCamelCase__ : List[str] = wi.T
lowerCamelCase__ : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Dict = tax_relpos_bias_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' ).T
lowerCamelCase__ : str = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase__ : Dict = old['''decoder/logits_dense/kernel'''].T
return new
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowerCamelCase__ : Dict = state_dict['''shared.weight''']
return state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = checkpoints.load_tax_checkpoint(UpperCAmelCase )
lowerCamelCase__ : str = convert_tax_to_pytorch(
UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase , scalable_attention=UpperCAmelCase )
lowerCamelCase__ : int = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = MTaConfig.from_json_file(UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase__ : Optional[int] = UMTaEncoderModel(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = UMTaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_A : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 142
| 1
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __get__( self ,a_ ,a_=None ) -> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
_UpperCAmelCase : Dict = """__cached_""" + self.fget.__name__
_UpperCAmelCase : str = getattr(a_ ,a_ ,a_ )
if cached is None:
_UpperCAmelCase : Tuple = self.fget(a_ )
setattr(a_ ,a_ ,a_ )
return cached
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def snake_case_ ( lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
if is_torch_fx_proxy(lowerCAmelCase_ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase_ , np.ndarray )
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
return isinstance(lowerCAmelCase_ , np.ndarray )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
return _is_numpy(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
import torch
return isinstance(lowerCAmelCase_ , torch.Tensor )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
import torch
return isinstance(lowerCAmelCase_ , torch.device )
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return False
return isinstance(lowerCAmelCase_ , torch.dtype )
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
import tensorflow as tf
return isinstance(lowerCAmelCase_ , tf.Tensor )
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase_ , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCAmelCase_ )
return type(lowerCAmelCase_ ) == tf.Tensor
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase_ , jnp.ndarray )
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase_ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase_ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase_ ):
return np.asarray(lowerCAmelCase_ ).tolist()
elif isinstance(lowerCAmelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase_ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return np.array(lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase_ ):
return np.asarray(lowerCAmelCase_ )
else:
return obj
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = fields(self )
# Safety and consistency checks
if not len(a_ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
_UpperCAmelCase : Tuple = getattr(self ,class_fields[0].name )
_UpperCAmelCase : Tuple = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(a_ ):
if isinstance(a_ ,a_ ):
_UpperCAmelCase : Union[str, Any] = first_field.items()
_UpperCAmelCase : int = True
else:
try:
_UpperCAmelCase : Optional[int] = iter(a_ )
_UpperCAmelCase : Tuple = True
except TypeError:
_UpperCAmelCase : int = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(a_ ):
if (
not isinstance(a_ ,(list, tuple) )
or not len(a_ ) == 2
or not isinstance(element[0] ,a_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCAmelCase : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
_UpperCAmelCase : str = element[1]
elif first_field is not None:
_UpperCAmelCase : List[str] = first_field
else:
for field in class_fields:
_UpperCAmelCase : Optional[Any] = getattr(self ,field.name )
if v is not None:
_UpperCAmelCase : Any = v
def __delitem__( self ,*a_ ,**a_ ) -> int:
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _snake_case ( self ,*a_ ,**a_ ) -> Optional[Any]:
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _snake_case ( self ,*a_ ,**a_ ) -> Union[str, Any]:
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _snake_case ( self ,*a_ ,**a_ ) -> str:
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self ,a_ ) -> int:
if isinstance(a_ ,a_ ):
_UpperCAmelCase : Any = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self ,a_ ,a_ ) -> Union[str, Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(a_ ,a_ )
super().__setattr__(a_ ,a_ )
def __setitem__( self ,a_ ,a_ ) -> str:
# Will raise a KeyException if needed
super().__setitem__(a_ ,a_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(a_ ,a_ )
def _snake_case ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class lowercase ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
@classmethod
def _snake_case ( cls ,a_ ) -> Union[str, Any]:
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """longest"""
UpperCAmelCase = """max_length"""
UpperCAmelCase = """do_not_pad"""
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """pt"""
UpperCAmelCase = """tf"""
UpperCAmelCase = """np"""
UpperCAmelCase = """jax"""
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = context_managers
_UpperCAmelCase : Union[str, Any] = ExitStack()
def __enter__( self ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(a_ )
def __exit__( self ,*a_ ,**a_ ) -> List[str]:
self.stack.__exit__(*a_ ,**a_ )
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = infer_framework(lowerCAmelCase_ )
if framework == "tf":
_UpperCAmelCase : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Optional[int] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = model_class.__name__
_UpperCAmelCase : Dict = infer_framework(lowerCAmelCase_ )
if framework == "tf":
_UpperCAmelCase : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "" , lowerCAmelCase_ = "." )-> Tuple:
'''simple docstring'''
def _flatten_dict(lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_="." ):
for k, v in d.items():
_UpperCAmelCase : List[Any] = str(lowerCAmelCase_ ) + delimiter + str(lowerCAmelCase_ ) if parent_key else k
if v and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
yield from flatten_dict(lowerCAmelCase_ , lowerCAmelCase_ , delimiter=lowerCAmelCase_ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
@contextmanager
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = False )-> Tuple:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=None )-> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.transpose(lowerCAmelCase_ , axes=lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.T if axes is None else array.permute(*lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase_ , perm=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.transpose(lowerCAmelCase_ , axes=lowerCAmelCase_ )
else:
raise ValueError(F'''Type not supported for transpose: {type(lowerCAmelCase_ )}.''' )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.reshape(*lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise ValueError(F'''Type not supported for reshape: {type(lowerCAmelCase_ )}.''' )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=None )-> Dict:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
raise ValueError(F'''Type not supported for squeeze: {type(lowerCAmelCase_ )}.''' )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.expand_dims(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.unsqueeze(dim=lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.expand_dims(lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowerCAmelCase_ )}.''' )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.size(lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.size(lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowerCAmelCase_ )}.''' )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(lowerCAmelCase_ , (tuple, list) ):
_UpperCAmelCase : Optional[Any] = [F'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCAmelCase : List[Any] = F'''{repo_id}--{value}'''
return auto_map
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
for base_class in inspect.getmro(lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = base_class.__module__
_UpperCAmelCase : List[str] = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 366
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[str] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : int = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowercase : List[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowercase : Union[str, Any] = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=SCREAMING_SNAKE_CASE__ , output_all_encodings=SCREAMING_SNAKE_CASE__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , SCREAMING_SNAKE_CASE__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowercase : Any = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowercase : Optional[int] = os.path.join(get_home_dir() , """models""" )
lowercase : List[Any] = _load_vocab(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls=SCREAMING_SNAKE_CASE__ )
lowercase : int = nlp.model.BERTModel(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=SCREAMING_SNAKE_CASE__ , use_token_type_embed=SCREAMING_SNAKE_CASE__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=SCREAMING_SNAKE_CASE__ , use_decoder=SCREAMING_SNAKE_CASE__ , )
original_bort.load_parameters(SCREAMING_SNAKE_CASE__ , cast_dtype=SCREAMING_SNAKE_CASE__ , ignore_extra=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowercase : Union[str, Any] = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(SCREAMING_SNAKE_CASE__ ),
}
lowercase : Dict = BertConfig.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : Any = BertForMaskedLM(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(SCREAMING_SNAKE_CASE__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = hf_param.shape
lowercase : int = to_torch(params[gluon_param] )
lowercase : str = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
lowercase : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowercase : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowercase : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowercase : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowercase : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowercase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowercase : BertSelfAttention = layer.attention.self
lowercase : Any = check_and_map_params(
self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
lowercase : str = check_and_map_params(
self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
lowercase : Any = check_and_map_params(
self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
lowercase : List[str] = check_and_map_params(
self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
lowercase : List[str] = check_and_map_params(
self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
lowercase : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
lowercase : BertSelfOutput = layer.attention.output
lowercase : List[Any] = check_and_map_params(
self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" )
lowercase : Optional[Any] = check_and_map_params(
self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" )
lowercase : Union[str, Any] = check_and_map_params(
self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" )
lowercase : List[str] = check_and_map_params(
self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
lowercase : BertIntermediate = layer.intermediate
lowercase : Dict = check_and_map_params(
intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
lowercase : str = check_and_map_params(
intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
lowercase : BertOutput = layer.output
lowercase : Optional[Any] = check_and_map_params(
bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
lowercase : int = check_and_map_params(
bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
lowercase : str = check_and_map_params(
bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
lowercase : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowercase : List[Any] = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowercase : int = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ )["""input_ids"""]
# Get gluon output
lowercase : Any = mx.nd.array([input_ids] )
lowercase : List[Any] = original_bort(inputs=SCREAMING_SNAKE_CASE__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : str = BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
lowercase : Union[str, Any] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : List[str] = hf_bort_model(**SCREAMING_SNAKE_CASE__ )[0]
lowercase : str = output_gluon[0].asnumpy()
lowercase : Optional[int] = output_hf[0].detach().numpy()
lowercase : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowercase : List[str] = np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase : Union[str, Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 20
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : str = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
a__ : List[str] = Vector()
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Optional[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase__ ) , """(0,0,0,0,0,1)""" )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : str = Vector([1, 2] )
a__ : Optional[Any] = Vector([1, 2, 3, 4, 5] )
a__ : int = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
a__ : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Optional[Any] = Vector([1, 2, 3] )
a__ : Optional[Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Any = Vector([1, 2, 3] )
a__ : Optional[int] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Tuple = Vector([1, 2, 3] )
a__ : List[str] = Vector([2, -1, 4] ) # for test of dot product
a__ : List[Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0 ) ).count("""0""" ) , 1_0 )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Optional[Any] = Vector([1, 2, 3] )
a__ : List[str] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase__ , UpperCamelCase__ ) ) , """(3,4,7)""" )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Tuple = Vector([1, 0, 0, 0, 0, 0] )
a__ : Dict = x.copy()
self.assertEqual(str(UpperCamelCase__ ) , str(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Optional[int] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase__ ) , """(0,1,0)""" )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : List[Any] = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : Tuple = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : List[str] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
a__ : str = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 369
|
_lowercase : Optional[int] =[
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowerCAmelCase_ ( _lowercase : str) -> int:
"""simple docstring"""
a__ : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
a__ : List[Any] = 0
a__ : Dict = 0
while place < len(_lowercase):
if (place + 1 < len(_lowercase)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowercase : int) -> str:
"""simple docstring"""
a__ : Optional[Any] = []
for arabic, roman in ROMAN:
((a__) , (a__)) : Optional[Any] = divmod(_lowercase , _lowercase)
result.append(roman * factor)
if number == 0:
break
return "".join(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Tuple = "ZinengTang/tvlt-base"
lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp()
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Optional[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Tuple:
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : List[str] = self.get_image_processor()
lowerCAmelCase__ : Optional[Any] = self.get_feature_extractor()
lowerCAmelCase__ : Dict = TvltProcessor(image_processor=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,__UpperCAmelCase )
self.assertIsInstance(processor.image_processor ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : List[str] = self.get_feature_extractor()
lowerCAmelCase__ : Optional[int] = TvltProcessor(image_processor=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = np.ones([1_2000] )
lowerCAmelCase__ : Dict = feature_extractor(__UpperCAmelCase ,return_tensors="""np""" )
lowerCAmelCase__ : Optional[int] = processor(audio=__UpperCAmelCase ,return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_feature_extractor()
lowerCAmelCase__ : Union[str, Any] = TvltProcessor(image_processor=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = np.ones([3, 224, 224] )
lowerCAmelCase__ : List[Any] = image_processor(__UpperCAmelCase ,return_tensors="""np""" )
lowerCAmelCase__ : List[Any] = processor(images=__UpperCAmelCase ,return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_feature_extractor()
lowerCAmelCase__ : int = TvltProcessor(image_processor=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = np.ones([1_2000] )
lowerCAmelCase__ : str = np.ones([3, 224, 224] )
lowerCAmelCase__ : Any = processor(audio=__UpperCAmelCase ,images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_feature_extractor()
lowerCAmelCase__ : Any = TvltProcessor(image_processor=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" ,)
| 37
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __a ( A__ ):
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCamelCase__ : Any = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : Tuple = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : str = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCamelCase__ : Optional[Any] = partial(SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Any = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 189
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase (a__ :Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
UpperCamelCase__ = []
if isinstance(a__ , a__ ):
for v in tree.values():
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def _UpperCamelCase (a__ :int , a__ :Tuple[int, ...] ):
"""simple docstring"""
UpperCamelCase__ = []
for d in reversed(a__ ):
idx.append(flat_idx % d )
UpperCamelCase__ = flat_idx // d
return tuple(reversed(a__ ) )
@torch.jit.ignore
def _UpperCamelCase (a__ :Sequence[int] , a__ :Sequence[int] , a__ :Sequence[int] , a__ :Optional[Sequence[bool]] = None , a__ :Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(a__ :List[bool] ) -> None:
UpperCamelCase__ = True
for i in range(len(a__ ) ):
UpperCamelCase__ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ = [s == 0 for s in start]
reduce_edge_list(a__ )
if end_edges is None:
UpperCamelCase__ = [e == (d - 1) for e, d in zip(a__ , a__ )]
reduce_edge_list(a__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(a__ ) == 0:
return [()]
elif len(a__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase__ = []
UpperCamelCase__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(a__ , a__ ):
if s == e:
path_list.append(slice(a__ , s + 1 ) )
else:
break
UpperCamelCase__ = tuple(a__ )
UpperCamelCase__ = len(a__ )
# start == end, and we're done
if divergence_idx == len(a__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ = start[divergence_idx]
return tuple(
path + (slice(a__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ = end[divergence_idx]
return tuple(
path + (slice(a__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _UpperCamelCase (a__ :torch.Tensor , a__ :int , a__ :int , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = t.shape[:no_batch_dims]
UpperCamelCase__ = list(_flat_idx_to_idx(a__ , a__ ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase__ = list(_flat_idx_to_idx(flat_end - 1 , a__ ) )
# Get an ordered list of slices to perform
UpperCamelCase__ = _get_minimal_slice_set(
a__ , a__ , a__ , )
UpperCamelCase__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _UpperCamelCase (a__ :Callable , a__ :Dict[str, Any] , a__ :int , a__ :int , a__ :bool = False , a__ :Any = None , a__ :bool = False , ):
"""simple docstring"""
if not (len(a__ ) > 0):
raise ValueError("""Must provide at least one input""" )
UpperCamelCase__ = [shape[:no_batch_dims] for shape in _fetch_dims(a__ )]
UpperCamelCase__ = tuple([max(a__ ) for s in zip(*a__ )] )
def _prep_inputs(a__ :torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase__ = tensor_tree_map(_prep_inputs , a__ )
UpperCamelCase__ = None
if _out is not None:
UpperCamelCase__ = tensor_tree_map(lambda a__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(a__ :torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ = 0
UpperCamelCase__ = prepped_outputs
for _ in range(a__ ):
# Chunk the input
if not low_mem:
UpperCamelCase__ = _select_chunk
else:
UpperCamelCase__ = partial(
_chunk_slice , flat_start=a__ , flat_end=min(a__ , i + chunk_size ) , no_batch_dims=len(a__ ) , )
UpperCamelCase__ = tensor_tree_map(a__ , a__ )
# Run the layer on the chunk
UpperCamelCase__ = layer(**a__ )
# Allocate space for the output
if out is None:
UpperCamelCase__ = tensor_tree_map(lambda a__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , a__ )
# Put the chunk in its pre-allocated space
if isinstance(a__ , a__ ):
def assign(a__ :dict , a__ :dict ) -> None:
for k, v in da.items():
if isinstance(a__ , a__ ):
assign(a__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ = da[k]
assign(a__ , a__ )
elif isinstance(a__ , a__ ):
for xa, xa in zip(a__ , a__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ = xa
elif isinstance(a__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
UpperCamelCase__ = tensor_tree_map(lambda a__ : t.view(orig_batch_dims + t.shape[1:] ) , a__ )
return out
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase = 512 , ):
UpperCamelCase__ = max_chunk_size
UpperCamelCase__ = None
UpperCamelCase__ = None
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase__ = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCAmelCase ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCamelCase , chunk_size=__lowerCamelCase )
return True
except RuntimeError:
return False
UpperCamelCase__ = 0
UpperCamelCase__ = len(__lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase__ = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ = i
UpperCamelCase__ = (i + len(__lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
for aa, aa in zip(__lowerCamelCase , __lowerCamelCase ):
assert type(__lowerCamelCase ) == type(__lowerCamelCase )
if isinstance(__lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__ = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
UpperCamelCase__ = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
UpperCamelCase__ = True
UpperCamelCase__ = tree_map(lambda __lowerCAmelCase : a.shape if isinstance(__lowerCamelCase , torch.Tensor ) else a , __lowerCamelCase , __lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCamelCase )
UpperCamelCase__ = self._compare_arg_caches(self.cached_arg_data , __lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ = False
if not consistent:
UpperCamelCase__ = self._determine_favorable_chunk_size(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
UpperCamelCase__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 368
|
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """masked_bert"""
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="topK" , __lowerCAmelCase="constant" , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = pruning_method
UpperCamelCase__ = mask_init
UpperCamelCase__ = mask_scale
| 87
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : dict ) -> Dict:
__a = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__a = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for node in graph )
def lowercase ( lowerCAmelCase__ : dict , lowerCAmelCase__ : int , lowerCAmelCase__ : set , lowerCAmelCase__ : set ) -> Optional[int]:
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 45
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowercase : Union[str, Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowercase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Any = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowercase : List[Any] = ""
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
| 93
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : List[str] = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 180
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
lowercase__ : List[str] = "sshleifer/student_marian_en_ro_6_1"
lowercase__ : List[Any] = "sshleifer/tiny-mbart"
@require_torch
class a__ ( UpperCamelCase__ ):
def lowerCAmelCase_ ( self , A=False , A=None , A=True , A=True , A=True , A=True , ) -> List[Any]:
'''simple docstring'''
a = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A , num_train_epochs=1 , distributed=A , extra_args_str=A , predict_with_generate=A , do_train=A , do_eval=A , do_predict=A , )
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
if not do_eval:
return
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
a = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , A )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=A , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=A )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
a = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
a = experiments[experiment_id]
a = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
a = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A , extra_args_str=data["extra_args_str"] )
a = len(re.findall(A , cl.err ) )
self.assertEqual(A , data["n_matches"] )
@slow
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=10 , distributed=A , )
# Check metrics
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
a = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , A )
# test if do_predict saves generations and metrics
a = os.listdir(A )
a = {os.path.basename(A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A ) -> Tuple[int, float]:
a = "--skip_memory_metrics 0"
a = self.run_trainer(
max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=1 , optim=A , distributed=A , extra_args_str=A , do_eval=A , do_predict=A , n_gpus_to_use=1 , )
# Check metrics
a = TrainerState.load_from_json(Path(A , "trainer_state.json" ) ).log_history
a = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
a = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
a = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
a = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
a = gpu_peak_mem_orig + gpu_alloc_mem_orig
a = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
a = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
a = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A , A , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
A , A , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
A , A , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self , A , A , A , A = 3e-3 , A = "adafactor" , A = False , A = None , A = 0 , A = True , A = True , A = True , A = True , A = None , ) -> Tuple:
'''simple docstring'''
a = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
a = self.get_auto_remove_tmp_dir()
a = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
a = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(A )}
'''.split()
a = "\n --do_predict\n ".split()
a = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
a = get_gpu_count()
a = get_torch_dist_unique_port()
a = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
a = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env() )
else:
a = ["run_translation.py"] + args
with patch.object(A , "argv" , A ):
main()
return output_dir
| 180
| 1
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
SCREAMING_SNAKE_CASE_:int = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
SCREAMING_SNAKE_CASE_:str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
return x[0]
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : int = get_letter_count(__A )
A : Tuple = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__A )
A : str = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__A )
A : Any = """""".join(freq_to_letter[freq] )
A : Tuple = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__A , reverse=__A )
A : Any = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__A )
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Union[str, Any] = get_frequency_order(__A )
A : List[str] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349
| 0
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase_ ( __snake_case ):
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Whether to use SortishSampler or not.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCamelCase ( self ):
_snake_case : str = super().to_dict()
for k, v in d.items():
if isinstance(lowercase_ , lowercase_ ):
_snake_case : Dict = v.to_dict()
return d
| 359
|
from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : Any = sorted(numsa + numsa )
_snake_case ,_snake_case : Any = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
__SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 284
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@register_to_config
def __init__( self :List[str] , lowercase_ :bool , lowercase_ :Optional[int] = None , lowercase_ :Optional[int] = None ) -> List[Any]:
super().__init__()
UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase = torch.zeros(lowercase_ , lowercase_ )
else:
UpperCAmelCase = None
UpperCAmelCase = torch.nn.Parameter(lowercase_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self :Union[str, Any] , lowercase_ :VQModel , lowercase_ :CLIPTextModel , lowercase_ :CLIPTokenizer , lowercase_ :TransformeraDModel , lowercase_ :VQDiffusionScheduler , lowercase_ :LearnedClassifierFreeSamplingEmbeddings , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase = prompt_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 )
else:
UpperCAmelCase = [''] * batch_size
UpperCAmelCase = text_input_ids.shape[-1]
UpperCAmelCase = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = negative_prompt_embeds.shape[1]
UpperCAmelCase = negative_prompt_embeds.repeat(1 , lowercase_ , 1 )
UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self :str , lowercase_ :Union[str, List[str]] , lowercase_ :int = 1_00 , lowercase_ :float = 5.0 , lowercase_ :float = 1.0 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[torch.FloatTensor] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , lowercase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ :int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = 1
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = len(lowercase_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}""" )
UpperCAmelCase = batch_size * num_images_per_prompt
UpperCAmelCase = guidance_scale > 1.0
UpperCAmelCase = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowercase_ )}.""" )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase = self.transformer.num_vector_embeds - 1
UpperCAmelCase = torch.full(lowercase_ , lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
UpperCAmelCase = self.scheduler.timesteps.to(self.device )
UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = model_output.chunk(2 )
UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ )
UpperCAmelCase = self.truncate(lowercase_ , lowercase_ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.vqvae.config.vq_embed_dim
UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ )
UpperCAmelCase = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :torch.FloatTensor , lowercase_ :float ) -> torch.FloatTensor:
UpperCAmelCase , UpperCAmelCase = torch.sort(lowercase_ , 1 , descending=lowercase_ )
UpperCAmelCase = torch.exp(lowercase_ )
UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ )
UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase = keep_mask[:, :-1, :]
UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase = log_p_x_0.clone()
UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 78
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
'''simple docstring'''
A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Any = logging.get_logger(__name__)
def _A (__a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE_ : Any = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE_ : str = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE_ : Tuple = [64, 80, 96]
SCREAMING_SNAKE_CASE_ : str = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE_ : str = 0.05
SCREAMING_SNAKE_CASE_ : str = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
SCREAMING_SNAKE_CASE_ : str = 5_12
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
SCREAMING_SNAKE_CASE_ : str = 21
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''pascal-voc-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ : List[str] = 10_00
SCREAMING_SNAKE_CASE_ : List[Any] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ : str = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : Any = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _A (__a , __a=False ) -> Any:
"""simple docstring"""
for i in range(1 , 6 ):
if f'layer_{i}.' in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace(f'layer_{i}.' , f'encoder.layer.{i - 1}.' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace(f'.{i}.{j}.' , f'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE_ : str = name.replace(f'.{i}.{j}.' , f'.{i}.' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f'.global_rep.{i}.weight' in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace(f'.global_rep.{i}.weight' , '''.layernorm.weight''' )
if f'.global_rep.{i}.bias' in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace(f'.global_rep.{i}.bias' , '''.layernorm.bias''' )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE_ : str = '''mobilevit.''' + name
return name
def _A (__a , __a , __a=False ) -> Any:
"""simple docstring"""
if base_model:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''''''
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : str = orig_state_dict.pop(__a )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE_ : str = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : Tuple = key.split('''.''' )
SCREAMING_SNAKE_CASE_ : int = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE_ : Optional[int] = int(key_split[3] )
SCREAMING_SNAKE_CASE_ : Any = model.get_submodule(f'{model_prefix}encoder.layer.{layer_num}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE_ : Dict = (
f'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
SCREAMING_SNAKE_CASE_ : str = val[:dim, :]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : int = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : int = val[:dim]
SCREAMING_SNAKE_CASE_ : Tuple = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : List[str] = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : Tuple = val
return orig_state_dict
def _A () -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def _A (__a , __a , __a , __a=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_mobilevit_config(__a )
# load original state_dict
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(__a , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
SCREAMING_SNAKE_CASE_ : Dict = MobileViTForSemanticSegmentation(__a ).eval()
else:
SCREAMING_SNAKE_CASE_ : str = MobileViTForImageClassification(__a ).eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**__a )
SCREAMING_SNAKE_CASE_ : int = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , __a , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE_ : str = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE_ : str = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if push_to_hub:
SCREAMING_SNAKE_CASE_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
SCREAMING_SNAKE_CASE_ : Tuple = model_mapping[mobilevit_name]
image_processor.push_to_hub(__a , organization='''apple''' )
model.push_to_hub(__a , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = data
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
def _A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = q.get()
SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : int = right_node
q.put(__a )
raise
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Tuple = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : str = []
while not q.empty():
SCREAMING_SNAKE_CASE_ : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ : str = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Any = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left
SCREAMING_SNAKE_CASE_ : Any = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], []
SCREAMING_SNAKE_CASE_ : List[Any] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _A (__a = "" , __a=50 , __a="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 318
| 1
|
"""simple docstring"""
from math import factorial
def _snake_case ( _snake_case : int = 100 ):
return sum(int(_snake_case ) for x in str(factorial(_snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 60
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87
| 0
|
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ : Union[str, Any] =[
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 262
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
UpperCAmelCase__ : Any ={
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
__A = """vit_msn"""
def __init__( self , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-06 , UpperCAmelCase_=224 , UpperCAmelCase_=16 , UpperCAmelCase_=3 , UpperCAmelCase_=True , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =image_size
lowerCamelCase =patch_size
lowerCamelCase =num_channels
lowerCamelCase =qkv_bias
| 262
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 180
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = JukeboxTokenizer
lowerCamelCase :str = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 180
| 1
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class UpperCamelCase_ (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(_UpperCAmelCase )
def __call__( self : Tuple , _lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self : Union[str, Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return {}, {}, {}
def _a ( self : Tuple , _lowerCamelCase : int ):
"""simple docstring"""
A_ : List[Any] = load_image(_UpperCAmelCase )
A_ : str = image.size
A_ : Optional[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def _a ( self : List[Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : List[Any] = self.model(**_UpperCAmelCase )
return model_outputs
def _a ( self : Optional[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Optional[int] = model_outputs.predicted_depth
A_ : Dict = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=_UpperCAmelCase )
A_ : Tuple = prediction.squeeze().cpu().numpy()
A_ : Dict = (output * 255 / np.max(_UpperCAmelCase )).astype('''uint8''' )
A_ : List[str] = Image.fromarray(_UpperCAmelCase )
A_ : List[Any] = {}
A_ : Union[str, Any] = predicted_depth
A_ : Dict = depth
return output_dict
| 366
|
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Optional[Any] = list(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = list(lowerCAmelCase_ )
UpperCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Union[str, Any] = """_"""
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def _snake_case ( UpperCamelCase : list[str] ):
UpperCAmelCase : Dict = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(lowerCAmelCase_ )
UpperCAmelCase : Tuple = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
UpperCAmelCase : Tuple = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Dict = """*"""
UpperCAmelCase : Tuple = """*"""
temp.append("""X""" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
UpperCAmelCase : Dict = list(set(lowerCAmelCase_ ) )
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Sequence[float] ):
UpperCAmelCase : List[Any] = []
for minterm in minterms:
UpperCAmelCase : Union[str, Any] = """"""
for _ in range(lowerCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int ):
UpperCAmelCase : Tuple = list(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = list(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _snake_case ( UpperCamelCase : list[list[int]] , UpperCamelCase : list[str] ):
UpperCAmelCase : List[str] = []
UpperCAmelCase : List[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : List[Any] = j
if count == 1:
UpperCAmelCase : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : str = 0
UpperCAmelCase : List[Any] = -1
UpperCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Tuple = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Optional[int] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Dict = 0
def _snake_case ( UpperCamelCase : list[str] , UpperCamelCase : list[str] ):
UpperCAmelCase : str = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
UpperCAmelCase : Optional[Any] = 1
return chart
def _snake_case ( ):
UpperCAmelCase : Optional[int] = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : int = [
float(lowerCAmelCase_ )
for x in input(
"""Enter the decimal representation of Minterms \'Spaces Separated\'\n""" ).split()
]
UpperCAmelCase : Dict = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = check(lowerCAmelCase_ )
print("""Prime Implicants are:""" )
print(lowerCAmelCase_ )
UpperCAmelCase : int = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : str = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Essential Prime Implicants are:""" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 109
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284
| 0
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str ,lowercase_ : Tuple ,lowercase_ : int ,lowercase_ : List[str] ,lowercase_ : Any=None ,lowercase_ : Any=None ):
lowerCAmelCase__ : int = start
lowerCAmelCase__ : int = end
lowerCAmelCase__ : str = val
lowerCAmelCase__ : Dict = (start + end) // 2
lowerCAmelCase__ : List[str] = left
lowerCAmelCase__ : List[Any] = right
def __repr__( self : Tuple ):
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ,lowercase_ : Sequence ,lowercase_ : List[str] ):
lowerCAmelCase__ : Dict = collection
lowerCAmelCase__ : int = function
if self.collection:
lowerCAmelCase__ : List[str] = self._build_tree(0 ,len(lowercase_ ) - 1 )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Union[str, Any] ,lowercase_ : str ):
self._update_tree(self.root ,lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[Any] ,lowercase_ : Tuple ):
return self._query_range(self.root ,lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : int ,lowercase_ : Dict ):
if start == end:
return SegmentTreeNode(lowercase_ ,lowercase_ ,self.collection[start] )
lowerCAmelCase__ : Dict = (start + end) // 2
lowerCAmelCase__ : str = self._build_tree(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = self._build_tree(mid + 1 ,lowercase_ )
return SegmentTreeNode(lowercase_ ,lowercase_ ,self.fn(left.val ,right.val ) ,lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[int] ,lowercase_ : List[Any] ,lowercase_ : Tuple ):
if node.start == i and node.end == i:
lowerCAmelCase__ : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left ,lowercase_ ,lowercase_ )
else:
self._update_tree(node.right ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : int = self.fn(node.left.val ,node.right.val )
def __lowerCAmelCase ( self : int ,lowercase_ : Dict ,lowercase_ : Optional[Any] ,lowercase_ : Union[str, Any] ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left ,lowercase_ ,lowercase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left ,lowercase_ ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,lowercase_ ) ,)
else:
# range in right child tree
return self._query_range(node.right ,lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
if self.root is not None:
lowerCAmelCase__ : int = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase__ : int = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 5_0)
__UpperCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 367
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = []
for line in lines:
lowerCAmelCase__ : int = re.sub(r'''#.*''' , '''''' , A_ ) # remove comments
if line:
filtered_lines.append(A_ )
lowerCAmelCase__ : Optional[int] = '''\n'''.join(A_ )
# Make a hash from all this code
lowerCAmelCase__ : int = full_str.encode('''utf-8''' )
return shaaaa(A_ ).hexdigest()
# get importable module names and hash for caching
__UpperCamelCase : Any = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__UpperCamelCase : Optional[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__UpperCamelCase : Union[str, Any] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 74
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
def __init__( self : Any , A : str , A : Dict=1_3 , A : Optional[Any]=7 , A : Tuple=False , A : List[str]=True , A : Tuple=False , A : str=False , A : int=1_9 , A : Optional[int]=3_2 , A : Tuple=5 , A : Optional[Any]=4 , A : Optional[int]=3_7 , A : Tuple="gelu" , A : int=0.1 , A : Optional[int]=0.1 , A : Tuple=5_1_2 , A : List[Any]=1_6 , A : Dict=2 , A : Any=0.02 , A : Optional[Any]=3 , A : Union[str, Any]=4 , A : List[str]=None , ):
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : int = use_input_mask
_UpperCAmelCase : Optional[int] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : Any = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : Dict = num_choices
_UpperCAmelCase : Dict = scope
def snake_case_ ( self : int ):
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=A , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def snake_case_ ( self : List[Any] , A : Tuple , A : str , A : str , A : int , A : Dict , A : int ):
_UpperCAmelCase : str = EsmForProteinFolding(config=A ).float()
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , attention_mask=A )
_UpperCAmelCase : Tuple = model(A )
_UpperCAmelCase : Tuple = model(A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : int = ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : str = False
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[int] = EsmFoldModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def snake_case_ ( self : Any ):
self.config_tester.run_common_tests()
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip("Does not support attention outputs" )
def snake_case_ ( self : Dict ):
pass
@unittest.skip
def snake_case_ ( self : str ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case_ ( self : Union[str, Any] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case_ ( self : Optional[int] ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def snake_case_ ( self : Any ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ ( self : Tuple ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def snake_case_ ( self : str ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def snake_case_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold only has one output format." )
def snake_case_ ( self : Optional[Any] ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def snake_case_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def snake_case_ ( self : Optional[int] ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def snake_case_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case_ ( self : Dict ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def snake_case_ ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ ( self : List[str] ):
pass
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase ):
@slow
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : str = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
_UpperCAmelCase : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_UpperCAmelCase : int = model(A )["positions"]
_UpperCAmelCase : Dict = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , A , atol=1e-4 ) )
| 202
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = GPTSanJapaneseTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def snake_case_ ( self : Any ):
super().setUp()
# fmt: off
_UpperCAmelCase : Any = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : Optional[int] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_UpperCAmelCase : List[Any] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def snake_case_ ( self : int , **A : List[str] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , A : Any ):
_UpperCAmelCase : Optional[Any] = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_UpperCAmelCase : List[Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def snake_case_ ( self : Optional[Any] , A : str ):
_UpperCAmelCase , _UpperCAmelCase : str = self.get_input_output_texts(A )
_UpperCAmelCase : List[Any] = tokenizer.encode(A , add_special_tokens=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def snake_case_ ( self : Any ):
pass # TODO add if relevant
def snake_case_ ( self : Union[str, Any] ):
pass # TODO add if relevant
def snake_case_ ( self : int ):
pass # TODO add if relevant
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Optional[int] = "こんにちは、世界。 こんばんは、㔺界。"
_UpperCAmelCase : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_UpperCAmelCase : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
_UpperCAmelCase : str = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Dict = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_UpperCAmelCase : Tuple = "こんにちは、、、、世界。こんばんは、、、、世界。"
_UpperCAmelCase : int = tokenizer.encode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : List[Any] = "こんにちは、世界。"
_UpperCAmelCase : List[str] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Any = "こんにちは、世界。こんばんは、世界。😀"
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(prefix_text + input_text )
_UpperCAmelCase : Tuple = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_UpperCAmelCase : Optional[int] = tokenizer.encode(A , prefix_text=A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : Any = "こんにちは、世界。"
_UpperCAmelCase : List[Any] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Optional[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[str] = [1] + [0] * (len_prefix + len_text + 1)
_UpperCAmelCase : str = [1] * (len_prefix + len_text + 1) + [0]
_UpperCAmelCase : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
_UpperCAmelCase : Any = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCAmelCase : List[Any] = tokenizer(A , prefix_text=A ).token_type_ids
self.assertListEqual(A , A )
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Dict = tokenizer.encode("あンいワ" )
_UpperCAmelCase : str = tokenizer.encode("" , prefix_text="あンいワ" )
_UpperCAmelCase : Dict = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertNotEqual(A , A )
self.assertNotEqual(A , A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_UpperCAmelCase : Tuple = tokenizer(A , padding=A )
_UpperCAmelCase : str = tokenizer.batch_encode_plus(A , padding=A )
# fmt: off
_UpperCAmelCase : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_UpperCAmelCase : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A )
self.assertListEqual(x_token.token_type_ids , A )
self.assertListEqual(x_token.attention_mask , A )
self.assertListEqual(x_token_a.input_ids , A )
self.assertListEqual(x_token_a.token_type_ids , A )
self.assertListEqual(x_token_a.attention_mask , A )
def snake_case_ ( self : List[Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case_ ( self : int ):
# tokenizer has no padding token
pass
| 202
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptTokenizer
SCREAMING_SNAKE_CASE__ : int = False
def lowercase_ ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCAmelCase_ : Dict = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
lowerCAmelCase_ : List[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowercase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : List[str] = '''lower newer'''
lowerCAmelCase_ : str = '''lower newer'''
return input_text, output_text
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ : str = '''lower'''
lowerCAmelCase_ : Optional[Any] = ['''low''', '''er</w>''']
lowerCAmelCase_ : Dict = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = tokens + ['''<unk>''']
lowerCAmelCase_ : Dict = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase_ : int = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
lowerCAmelCase_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
lowerCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(__lowercase )
lowerCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 262
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase )
| 262
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _a ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=13 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: int=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: int=99 , UpperCamelCase_: int=32 , UpperCamelCase_: Optional[int]=5 , UpperCamelCase_: int=4 , UpperCamelCase_: Any=37 , UpperCamelCase_: Dict="gelu" , UpperCamelCase_: str=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: int=512 , UpperCamelCase_: int=16 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: Tuple=4 , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _a ( a__ , unittest.TestCase ):
_lowercase : List[str] = True
_lowercase : Dict = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''roberta-base''' , from_pt=_lowerCamelCase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 369
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] ) -> Union[str, Any]:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict=0.9 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[int]=0.5 ) -> Dict:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ = [
meteor_score.single_meteor_score(
word_tokenize(UpperCamelCase_ ) , word_tokenize(UpperCamelCase_ ) , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
else:
lowercase__ = [
meteor_score.single_meteor_score(UpperCamelCase_ , UpperCamelCase_ , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return {"meteor": np.mean(UpperCamelCase_ )}
| 93
| 0
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a = False
class A_ (unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : int = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[int] = generator.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = "cyberpunk 2077"
UpperCAmelCase_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCAmelCase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger "
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : int = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Optional[Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images
UpperCAmelCase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 61
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_=False )-> int:
'''simple docstring'''
a : List[str] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_=False )-> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : Tuple = ""
else:
a : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
a : Tuple = in_proj_bias[: config.hidden_size]
a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : int = in_proj_weight[
-config.hidden_size :, :
]
a : int = in_proj_bias[-config.hidden_size :]
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def lowercase ( A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = dct.pop(A_ )
a : str = val
def lowercase ( )-> List[Any]:
'''simple docstring'''
a : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Tuple = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=A_ , )
a : Union[str, Any] = ViTHybridConfig(backbone_config=A_ , image_size=384 , num_labels=1_000 )
a : Optional[Any] = False
# load original model from timm
a : Any = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A_ )
a : int = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , A_ )
a : Union[str, Any] = "huggingface/label-files"
a : Optional[int] = "imagenet-1k-id2label.json"
a : str = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Optional[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : str = idalabel
a : Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a : List[Any] = ViTHybridModel(A_ ).eval()
else:
a : Optional[int] = ViTHybridForImageClassification(A_ ).eval()
model.load_state_dict(A_ )
# create image processor
a : Tuple = create_transform(**resolve_data_config({} , model=A_ ) )
a : List[Any] = transform.transforms
a : int = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a : str = ViTHybridImageProcessor(
do_resize=A_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=A_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a : List[Any] = prepare_img()
a : Optional[Any] = transform(A_ ).unsqueeze(0 )
a : str = processor(A_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A_ , A_ )
# verify logits
with torch.no_grad():
a : Dict = model(A_ )
a : Tuple = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
a : str = timm_model.forward_features(A_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A_ , outputs.pooler_output , atol=1e-3 )
else:
a : int = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : int=18 , __UpperCAmelCase : int=30 , __UpperCAmelCase : Optional[int]=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=True , ):
a : Optional[int] = size if size is not None else {"height": 18, "width": 18}
a : Any = parent
a : int = batch_size
a : str = num_channels
a : Dict = image_size
a : Dict = min_resolution
a : Optional[int] = max_resolution
a : Optional[int] = do_resize
a : Any = size
a : Dict = apply_ocr
def __snake_case ( self : Optional[int]):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __snake_case ( self : List[Any]):
a : Optional[int] = LayoutLMvaImageProcessingTester(self)
@property
def __snake_case ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[Any]):
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
self.assertTrue(hasattr(__UpperCAmelCase , "apply_ocr"))
def __snake_case ( self : str):
a : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def __snake_case ( self : Union[str, Any]):
pass
def __snake_case ( self : List[str]):
# Initialize image_processing
a : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : str = image_processing(image_inputs[0] , return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase)
self.assertIsInstance(encoding.boxes , __UpperCAmelCase)
# Test batched
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : Union[str, Any]):
# Initialize image_processing
a : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a : Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : List[str] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : List[str] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# with apply_OCR = True
a : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a : List[str] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test")
a : int = Image.open(ds[0]["file"]).convert("RGB")
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase)
self.assertListEqual(encoding.boxes , __UpperCAmelCase)
# with apply_OCR = False
a : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase)
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 226
| 1
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE : int = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase ( _snake_case : list[list[int]] , _snake_case : list[int] , _snake_case : list[int] , _snake_case : int , _snake_case : list[list[int]] , ) ->List[str]:
"""simple docstring"""
__snake_case : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case : str = 1
__snake_case : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : str = init[1]
__snake_case : List[str] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Optional[Any] = [[f, g, x, y]]
__snake_case : Tuple = False # flag that is set when search is complete
__snake_case : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : str = cell.pop()
__snake_case : Tuple = next_cell[2]
__snake_case : Union[str, Any] = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Any = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Dict = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[Any] = 1
__snake_case : int = i
__snake_case : Dict = []
__snake_case : Tuple = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : List[str] = xa
__snake_case : Union[str, Any] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE : Optional[int] = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE : List[Any] = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE : Dict = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE : Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE : str = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE : Union[str, Any] = 99
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 102
|
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 74
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 42
@flax_register_to_config
class a_ ( nn.Module , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 32
UpperCAmelCase_ = 4
UpperCAmelCase_ = 4
UpperCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase_ = False
UpperCAmelCase_ = (320, 640, 1_280, 1_280)
UpperCAmelCase_ = 2
UpperCAmelCase_ = 8
UpperCAmelCase_ = None
UpperCAmelCase_ = 1_280
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = False
UpperCAmelCase_ = jnp.floataa
UpperCAmelCase_ = True
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
def __snake_case ( self : Optional[int] , lowercase__ : jax.random.KeyArray):
'''simple docstring'''
lowerCAmelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase__ = jnp.zeros(lowercase__ , dtype=jnp.floataa)
lowerCAmelCase__ = jnp.ones((1,) , dtype=jnp.intaa)
lowerCAmelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
lowerCAmelCase__ , lowerCAmelCase__ = jax.random.split(lowercase__)
lowerCAmelCase__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase__ , lowercase__ , lowercase__ , lowercase__)["params"]
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.block_out_channels
lowerCAmelCase__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.')
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
lowerCAmelCase__ = FlaxTimestepEmbedding(lowercase__ , dtype=self.dtype)
lowerCAmelCase__ = self.only_cross_attention
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = (num_attention_heads,) * len(self.down_block_types)
# down
lowerCAmelCase__ = []
lowerCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
lowerCAmelCase__ = i == len(lowercase__) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase__)
lowerCAmelCase__ = down_blocks
# mid
lowerCAmelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase__ = []
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = reversed_block_out_channels[i]
lowerCAmelCase__ = reversed_block_out_channels[min(i + 1 , len(lowercase__) - 1)]
lowerCAmelCase__ = i == len(lowercase__) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase__ = FlaxCrossAttnUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase__)
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = up_blocks
# out
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : bool = True , lowercase__ : bool = False , ):
'''simple docstring'''
if not isinstance(lowercase__ , jnp.ndarray):
lowerCAmelCase__ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(lowercase__ , jnp.ndarray) and len(timesteps.shape) == 0:
lowerCAmelCase__ = timesteps.astype(dtype=jnp.floataa)
lowerCAmelCase__ = jnp.expand_dims(lowercase__ , 0)
lowerCAmelCase__ = self.time_proj(lowercase__)
lowerCAmelCase__ = self.time_embedding(lowercase__)
# 2. pre-process
lowerCAmelCase__ = jnp.transpose(lowercase__ , (0, 2, 3, 1))
lowerCAmelCase__ = self.conv_in(lowercase__)
# 3. down
lowerCAmelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ , lowerCAmelCase__ = down_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train)
else:
lowerCAmelCase__ , lowerCAmelCase__ = down_block(lowercase__ , lowercase__ , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase__ , lowercase__):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase__ = new_down_block_res_samples
# 4. mid
lowerCAmelCase__ = self.mid_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase__ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = up_block(
lowercase__ , temb=lowercase__ , encoder_hidden_states=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train , )
else:
lowerCAmelCase__ = up_block(lowercase__ , temb=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train)
# 6. post-process
lowerCAmelCase__ = self.conv_norm_out(lowercase__)
lowerCAmelCase__ = nn.silu(lowercase__)
lowerCAmelCase__ = self.conv_out(lowercase__)
lowerCAmelCase__ = jnp.transpose(lowercase__ , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase__)
| 119
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'xlm-roberta-xl'
def __init__( self : int , lowercase__ : Optional[int]=250_880 , lowercase__ : Union[str, Any]=2_560 , lowercase__ : int=36 , lowercase__ : int=32 , lowercase__ : List[Any]=10_240 , lowercase__ : Union[str, Any]="gelu" , lowercase__ : Dict=0.1 , lowercase__ : Any=0.1 , lowercase__ : Any=514 , lowercase__ : Optional[int]=1 , lowercase__ : Any=0.02 , lowercase__ : List[str]=1e-05 , lowercase__ : Dict=1 , lowercase__ : Union[str, Any]=0 , lowercase__ : Dict=2 , lowercase__ : str="absolute" , lowercase__ : Dict=True , lowercase__ : List[Any]=None , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = classifier_dropout
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __snake_case ( self : int):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 119
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.