code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE :
# setable values
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None # sigma(t_i)
@classmethod
def SCREAMING_SNAKE_CASE_( cls ) -> int:
return cls()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return True
@register_to_config
def __init__( self , lowercase = 0.0_2 , lowercase = 100 , lowercase = 1.0_0_7 , lowercase = 80 , lowercase = 0.0_5 , lowercase = 50 , ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> str:
return KarrasVeSchedulerState.create()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = () ) -> KarrasVeSchedulerState:
lowerCamelCase_ = jnp.arange(0 , lowercase )[::-1].copy()
lowerCamelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase , schedule=jnp.array(lowercase , dtype=jnp.floataa ) , timesteps=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ = random.split(lowercase , num=1 )
lowerCamelCase_ = self.config.s_noise * random.normal(key=lowercase , shape=sample.shape )
lowerCamelCase_ = sigma + gamma * sigma
lowerCamelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowerCamelCase_ = sample_hat + sigma_hat * model_output
lowerCamelCase_ = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowerCamelCase_ = sample_prev + sigma_prev * model_output
lowerCamelCase_ = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> int:
raise NotImplementedError()
| 463
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.0_2 , lowercase=["stage2", "stage3", "stage4"] , lowercase=[2, 3, 4] , lowercase=None , ) -> Any:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_stages
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = initializer_range
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
lowerCamelCase_ = scope
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = ConvNextModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = ConvNextForImageClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = ConvNextBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = ConvNextBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ConvNextModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ConvNextModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowerCamelCase_ ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(lowercase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowercase , return_tensors="pt" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**lowercase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
lowerCamelCase_ = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case_ ):
lowerCAmelCase__ = (ConvNextBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = ConvNextConfig
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = ConvNextModelTester(self )
| 463
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCamelCase ( __a : List[Any] , __a : Dict , __a : Optional[Any]=None , __a : List[Any]=None ) -> str:
if attention_mask is None:
_lowercase =tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OPTConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=16 , lowerCAmelCase_=16 , ):
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =eos_token_id
_lowercase =pad_token_id
_lowercase =bos_token_id
_lowercase =embed_dim
_lowercase =word_embed_proj_dim
_lowercase =False
def __lowerCAmelCase ( self ):
_lowercase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase =tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
_lowercase =prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =TFOPTModel(config=lowerCAmelCase_ )
_lowercase =inputs_dict["input_ids"]
_lowercase =input_ids[:1, :]
_lowercase =inputs_dict["attention_mask"][:1, :]
_lowercase =1
# first forward pass
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_lowercase , _lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase =tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase =output_from_no_past[:, -3:, random_slice_idx]
_lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _a ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFOPTForCausalLM,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 10
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowercase =model_class(config=lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowercase =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
_lowercase =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
_lowercase =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
def __lowerCamelCase ( __a : Tuple ) -> Dict:
return tf.constant(__a , dtype=tf.intaa )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 99
def __lowerCAmelCase ( self ):
_lowercase =tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowercase =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowercase =input_ids.shape[0]
_lowercase =OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModel.from_pretrained("facebook/opt-350m" )
_lowercase =_long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowercase =tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_lowercase =model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
_lowercase =(1, 11, 512)
self.assertEqual(output.shape , lowerCAmelCase_ )
_lowercase =tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
super().setUp()
_lowercase ="facebook/opt-350m"
def __lowerCAmelCase ( self ):
_lowercase =TFOPTForCausalLM.from_pretrained(self.path_model )
_lowercase =GPTaTokenizer.from_pretrained(self.path_model )
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowercase =tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-125m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
_lowercase ="left"
# use different length sentences to test batching
_lowercase =[
"Hello, my dog is a little",
"Today, I",
]
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ )
_lowercase =inputs["input_ids"]
_lowercase =model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs["attention_mask"] )
_lowercase =tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ )
_lowercase =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_lowercase =tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =[
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 594
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
return 0.0
def __lowerCamelCase ( __a : np.ndarray , __a : int ) -> tuple[int | float, int | float]:
_lowercase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowercase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __a : FilterType , __a : int ) -> None:
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(__a ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.abs(np.fft.fft(__a ) )
_lowercase =20 * np.logaa(__a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowercase =get_bounds(__a , __a )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__a )
plt.show()
def __lowerCamelCase ( __a : FilterType , __a : int ) -> None:
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(__a ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.angle(np.fft.fft(__a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__a , -2 * pi ) )
plt.show()
| 594
| 1
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 84
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 6
| 0
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCAmelCase ( lowercase : List[str] , lowercase : str=1_0_0_0 ) ->Tuple:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(lowercase , lowercase , lowercase )
if b != 1:
lowercase__ = True
for _ in range(lowercase ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCAmelCase = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 318
| 1
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : Tuple, UpperCamelCase_ : Tuple, UpperCamelCase_ : Union[str, Any] ):
UpperCAmelCase__ :Optional[int] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase__ :int = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
UpperCAmelCase__ :Union[str, Any] = F'''{src_lang}-{tgt_lang}'''
UpperCAmelCase__ :str = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=_UpperCAmelCase, exist_ok=_UpperCAmelCase )
UpperCAmelCase__ :str = os.path.join(_UpperCAmelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCAmelCase )
# make sure we are under the root of the project
__lowerCamelCase = Path(__file__).resolve().parent.parent.parent
__lowerCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__lowerCamelCase = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 467
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 562
| 0
|
import pprint
import requests
snake_case_ : Union[str, Any] = "https://zenquotes.io/api"
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
snake_case_ : int = random_quotes()
pprint.pprint(response)
| 253
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
snake_case_ : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase_ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCamelCase_ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCamelCase_ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase_ : Optional[int] = value
elif weight_type == "weight_v":
lowerCamelCase_ : Tuple = value
elif weight_type == "bias":
lowerCamelCase_ : str = value
elif weight_type == "running_mean":
lowerCamelCase_ : List[str] = value
elif weight_type == "running_var":
lowerCamelCase_ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ : Tuple = value
elif weight_type == "inv_freq":
lowerCamelCase_ : Union[str, Any] = value
else:
lowerCamelCase_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = []
lowerCamelCase_ : int = fairseq_model.state_dict()
lowerCamelCase_ : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : Any = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ : int = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ : Dict = True
if "*" in mapped_key:
lowerCamelCase_ : Union[str, Any] = name.split(__UpperCAmelCase )[0].split("." )[-2]
lowerCamelCase_ : Any = mapped_key.replace("*" , __UpperCAmelCase )
if "pos_bias_u" in name:
lowerCamelCase_ : str = None
elif "pos_bias_v" in name:
lowerCamelCase_ : Optional[int] = None
elif "weight_g" in name:
lowerCamelCase_ : str = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ : Any = "weight_v"
elif "bias" in name:
lowerCamelCase_ : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : List[Any] = "weight"
elif "running_mean" in name:
lowerCamelCase_ : Union[str, Any] = "running_mean"
elif "inv_freq" in name:
lowerCamelCase_ : Optional[Any] = "inv_freq"
elif "running_var" in name:
lowerCamelCase_ : int = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ : Union[str, Any] = "num_batches_tracked"
else:
lowerCamelCase_ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def __a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = full_name.split("conv_layers." )[-1]
lowerCamelCase_ : Union[str, Any] = name.split("." )
lowerCamelCase_ : Optional[int] = int(items[0] )
lowerCamelCase_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowerCamelCase_ : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowerCamelCase_ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
lowerCamelCase_ : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
lowerCamelCase_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
lowerCamelCase_ : Any = WavaVecaConformerConfig.from_pretrained(__UpperCAmelCase , hidden_act="swish" )
else:
lowerCamelCase_ : List[str] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase_ : str = "rotary"
if is_finetuned:
if dict_path:
lowerCamelCase_ : int = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ : List[Any] = target_dict.pad_index
lowerCamelCase_ : List[str] = target_dict.bos_index
lowerCamelCase_ : Any = target_dict.eos_index
lowerCamelCase_ : Any = len(target_dict.symbols )
lowerCamelCase_ : Union[str, Any] = os.path.join(__UpperCAmelCase , "vocab.json" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCamelCase_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : List[Any] = 1
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : List[Any] = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__UpperCAmelCase , )
lowerCamelCase_ : Dict = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
lowerCamelCase_ : Dict = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Dict = WavaVecaConformerForCTC(__UpperCAmelCase )
else:
lowerCamelCase_ : int = WavaVecaConformerForPreTraining(__UpperCAmelCase )
if is_finetuned:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase_ : Optional[int] = argparse.Namespace(task="audio_pretraining" )
lowerCamelCase_ : List[Any] = fairseq.tasks.setup_task(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
lowerCamelCase_ : List[str] = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
snake_case_ : Optional[Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 253
| 1
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for i in range(len(lowerCamelCase ) - 1 , 0 , -1 ):
__lowercase = False
for j in range(lowerCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowercase , __lowercase = unsorted[j - 1], unsorted[j]
__lowercase = True
for j in range(lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
__lowercase , __lowercase = unsorted[j + 1], unsorted[j]
__lowercase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item) for item in user_input.split(""",""")]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 80
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 498
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase__( UpperCamelCase_ ):
'''simple docstring'''
def __get__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=None) -> int:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
lowercase__ = '__cached_' + self.fget.__name__
lowercase__ = getattr(__a , __a , __a)
if cached is None:
lowercase__ = self.fget(__a)
setattr(__a , __a , __a)
return cached
def _lowerCAmelCase ( A__ ):
lowercase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def _lowerCAmelCase ( A__ ):
if is_torch_fx_proxy(A__ ):
return True
if is_torch_available():
import torch
if isinstance(A__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(A__ , np.ndarray )
def _lowerCAmelCase ( A__ ):
return isinstance(A__ , np.ndarray )
def _lowerCAmelCase ( A__ ):
return _is_numpy(A__ )
def _lowerCAmelCase ( A__ ):
import torch
return isinstance(A__ , torch.Tensor )
def _lowerCAmelCase ( A__ ):
return False if not is_torch_available() else _is_torch(A__ )
def _lowerCAmelCase ( A__ ):
import torch
return isinstance(A__ , torch.device )
def _lowerCAmelCase ( A__ ):
return False if not is_torch_available() else _is_torch_device(A__ )
def _lowerCAmelCase ( A__ ):
import torch
if isinstance(A__ , A__ ):
if hasattr(A__ , A__ ):
lowercase__ = getattr(A__ , A__ )
else:
return False
return isinstance(A__ , torch.dtype )
def _lowerCAmelCase ( A__ ):
return False if not is_torch_available() else _is_torch_dtype(A__ )
def _lowerCAmelCase ( A__ ):
import tensorflow as tf
return isinstance(A__ , tf.Tensor )
def _lowerCAmelCase ( A__ ):
return False if not is_tf_available() else _is_tensorflow(A__ )
def _lowerCAmelCase ( A__ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A__ , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(A__ )
return type(A__ ) == tf.Tensor
def _lowerCAmelCase ( A__ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(A__ )
def _lowerCAmelCase ( A__ ):
import jax.numpy as jnp # noqa: F811
return isinstance(A__ , jnp.ndarray )
def _lowerCAmelCase ( A__ ):
return False if not is_flax_available() else _is_jax(A__ )
def _lowerCAmelCase ( A__ ):
if isinstance(A__ , (dict, UserDict) ):
return {k: to_py_obj(A__ ) for k, v in obj.items()}
elif isinstance(A__ , (list, tuple) ):
return [to_py_obj(A__ ) for o in obj]
elif is_tf_tensor(A__ ):
return obj.numpy().tolist()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A__ ):
return np.asarray(A__ ).tolist()
elif isinstance(A__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase ( A__ ):
if isinstance(A__ , (dict, UserDict) ):
return {k: to_numpy(A__ ) for k, v in obj.items()}
elif isinstance(A__ , (list, tuple) ):
return np.array(A__ )
elif is_tf_tensor(A__ ):
return obj.numpy()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A__ ):
return np.asarray(A__ )
else:
return obj
class UpperCAmelCase__( UpperCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f'''{self.__class__.__name__} has no fields.''')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''')
lowercase__ = getattr(self , class_fields[0].name)
lowercase__ = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a , __a):
lowercase__ = first_field.items()
lowercase__ = True
else:
try:
lowercase__ = iter(__a)
lowercase__ = True
except TypeError:
lowercase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a , (list, tuple))
or not len(__a) == 2
or not isinstance(element[0] , __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
lowercase__ = element[1]
elif first_field is not None:
lowercase__ = first_field
else:
for field in class_fields:
lowercase__ = getattr(self , field.name)
if v is not None:
lowercase__ = v
def __delitem__( self : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''')
def UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''')
def UpperCAmelCase ( self : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> List[str]:
"""simple docstring"""
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''')
def UpperCAmelCase ( self : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''')
def __getitem__( self : int , lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
if isinstance(__a , __a):
lowercase__ = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a , __a)
super().__setattr__(__a , __a)
def __setitem__( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
super().__setitem__(__a , __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a , __a)
def UpperCAmelCase ( self : Dict) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys())
class UpperCAmelCase__( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
@classmethod
def UpperCAmelCase ( cls : Tuple , lowerCAmelCase : Any) -> List[str]:
"""simple docstring"""
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}''')
class UpperCAmelCase__( UpperCamelCase_ ):
'''simple docstring'''
A : List[str] = "longest"
A : List[str] = "max_length"
A : Tuple = "do_not_pad"
class UpperCAmelCase__( UpperCamelCase_ ):
'''simple docstring'''
A : Optional[int] = "pt"
A : Any = "tf"
A : Any = "np"
A : Dict = "jax"
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = context_managers
lowercase__ = ExitStack()
def __enter__( self : Optional[Any]) -> Any:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
self.stack.__exit__(*__a , **__a)
def _lowerCAmelCase ( A__ ):
lowercase__ = infer_framework(A__ )
if framework == "tf":
lowercase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase ( A__ ):
lowercase__ = model_class.__name__
lowercase__ = infer_framework(A__ )
if framework == "tf":
lowercase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase ( A__ , A__ = "" , A__ = "." ):
def _flatten_dict(A__ , A__="" , A__="." ):
for k, v in d.items():
lowercase__ = str(A__ ) + delimiter + str(A__ ) if parent_key else k
if v and isinstance(A__ , A__ ):
yield from flatten_dict(A__ , A__ , delimiter=A__ ).items()
else:
yield key, v
return dict(_flatten_dict(A__ , A__ , A__ ) )
@contextmanager
def _lowerCAmelCase ( A__ , A__ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase ( A__ , A__=None ):
if is_numpy_array(A__ ):
return np.transpose(A__ , axes=A__ )
elif is_torch_tensor(A__ ):
return array.T if axes is None else array.permute(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.transpose(A__ , perm=A__ )
elif is_jax_tensor(A__ ):
return jnp.transpose(A__ , axes=A__ )
else:
raise ValueError(F'''Type not supported for transpose: {type(A__ )}.''' )
def _lowerCAmelCase ( A__ , A__ ):
if is_numpy_array(A__ ):
return np.reshape(A__ , A__ )
elif is_torch_tensor(A__ ):
return array.reshape(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.reshape(A__ , A__ )
elif is_jax_tensor(A__ ):
return jnp.reshape(A__ , A__ )
else:
raise ValueError(F'''Type not supported for reshape: {type(A__ )}.''' )
def _lowerCAmelCase ( A__ , A__=None ):
if is_numpy_array(A__ ):
return np.squeeze(A__ , axis=A__ )
elif is_torch_tensor(A__ ):
return array.squeeze() if axis is None else array.squeeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.squeeze(A__ , axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.squeeze(A__ , axis=A__ )
else:
raise ValueError(F'''Type not supported for squeeze: {type(A__ )}.''' )
def _lowerCAmelCase ( A__ , A__ ):
if is_numpy_array(A__ ):
return np.expand_dims(A__ , A__ )
elif is_torch_tensor(A__ ):
return array.unsqueeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.expand_dims(A__ , axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.expand_dims(A__ , axis=A__ )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(A__ )}.''' )
def _lowerCAmelCase ( A__ ):
if is_numpy_array(A__ ):
return np.size(A__ )
elif is_torch_tensor(A__ ):
return array.numel()
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.size(A__ )
elif is_jax_tensor(A__ ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(A__ )}.''' )
def _lowerCAmelCase ( A__ , A__ ):
for key, value in auto_map.items():
if isinstance(A__ , (tuple, list) ):
lowercase__ = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase__ = F'''{repo_id}--{value}'''
return auto_map
def _lowerCAmelCase ( A__ ):
for base_class in inspect.getmro(A__ ):
lowercase__ = base_class.__module__
lowercase__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 717
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
lowerCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def A__ ( A : Any , A : Optional[int] , A : Dict , A : Tuple , A : Any):
'''simple docstring'''
for attribute in key.split("."):
UpperCamelCase : Tuple = getattr(A , A)
if weight_type is not None:
UpperCamelCase : Optional[int] = getattr(A , A).shape
else:
UpperCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
UpperCamelCase : int = value
elif weight_type == "weight_g":
UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_v":
UpperCamelCase : Any = value
elif weight_type == "bias":
UpperCamelCase : str = value
else:
UpperCamelCase : Optional[int] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def A__ ( A : Optional[int] , A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Tuple = []
UpperCamelCase : List[Any] = fairseq_model.state_dict()
UpperCamelCase : List[Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase : int = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase : Optional[Any] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key):
# special case since naming is very similar
continue
UpperCamelCase : Tuple = True
if "*" in mapped_key:
UpperCamelCase : List[str] = name.split(A)[0].split(".")[-2]
UpperCamelCase : Any = mapped_key.replace("*" , A)
if "weight_g" in name:
UpperCamelCase : Tuple = "weight_g"
elif "weight_v" in name:
UpperCamelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
UpperCamelCase : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase : List[Any] = "weight"
else:
UpperCamelCase : int = None
set_recursively(A , A , A , A , A)
continue
if not is_used:
unused_weights.append(A)
logger.warning(F'''Unused weights: {unused_weights}''')
def A__ ( A : str , A : int , A : List[Any] , A : List[str] , A : str):
'''simple docstring'''
UpperCamelCase : List[Any] = full_name.split("conv_layers.")[-1]
UpperCamelCase : Optional[int] = name.split(".")
UpperCamelCase : Optional[int] = int(items[0])
UpperCamelCase : Union[str, Any] = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
UpperCamelCase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
UpperCamelCase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''')
UpperCamelCase : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''')
UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(A)
@torch.no_grad()
def A__ ( A : Any , A : Tuple , A : Optional[int]=None , A : Tuple=None , A : Union[str, Any]=True):
'''simple docstring'''
if config_path is not None:
UpperCamelCase : Optional[Any] = UniSpeechSatConfig.from_pretrained(A)
else:
UpperCamelCase : Optional[int] = UniSpeechSatConfig()
UpperCamelCase : Optional[int] = ""
if is_finetuned:
UpperCamelCase : Optional[Any] = UniSpeechSatForCTC(A)
else:
UpperCamelCase : List[Any] = UniSpeechSatForPreTraining(A)
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
UpperCamelCase : Tuple = model[0].eval()
recursively_load_weights(A , A)
hf_wavavec.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 173
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: '
lowerCAmelCase_ = 'huggingface-tools/default-prompts'
lowerCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def A__ ( A : Dict , A : List[str] , A : List[str]="run"):
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCamelCase : Optional[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , A) is not None:
return prompt_or_repo_id
UpperCamelCase : int = cached_file(
A , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name})
with open(A , "r" , encoding="utf-8") as f:
return f.read()
| 173
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Dict:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
lowercase = number
while duplicate > 0:
lowercase = divmod(_lowerCamelCase , 1_0 )
fact_sum += factorial(_lowerCamelCase )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
__lowerCAmelCase : str =int(input("""Enter number: """).strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 704
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase="None" , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = relative_attention
lowercase = position_biased_input
lowercase = pos_att_type
lowercase = scope
def A__ ( self ):
"""simple docstring"""
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_config()
lowercase = 300
return config
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DebertaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
lowercase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
lowercase = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DebertaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = DebertaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = DebertaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DebertaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Any = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = True
snake_case__ : Optional[Any] = False
snake_case__ : Optional[int] = False
snake_case__ : Any = False
snake_case__ : List[Any] = False
def A__ ( self ):
"""simple docstring"""
lowercase = DebertaModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = DebertaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def A__ ( self ):
"""simple docstring"""
pass
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
lowercase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowercase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 197
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """marian"""
lowerCamelCase = ["""past_key_values"""]
lowerCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , UpperCamelCase__ : Dict=5_8101 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Tuple=5_8100 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : str=5_8100 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : int=0 , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : int , ) -> int:
"""simple docstring"""
snake_case : List[Any] = vocab_size
snake_case : Optional[int] = decoder_vocab_size or vocab_size
snake_case : int = max_position_embeddings
snake_case : Tuple = d_model
snake_case : int = encoder_ffn_dim
snake_case : Optional[int] = encoder_layers
snake_case : Union[str, Any] = encoder_attention_heads
snake_case : List[str] = decoder_ffn_dim
snake_case : List[str] = decoder_layers
snake_case : List[str] = decoder_attention_heads
snake_case : Any = dropout
snake_case : Optional[Any] = attention_dropout
snake_case : Tuple = activation_dropout
snake_case : Union[str, Any] = activation_function
snake_case : int = init_std
snake_case : Dict = encoder_layerdrop
snake_case : Dict = decoder_layerdrop
snake_case : List[Any] = use_cache
snake_case : int = encoder_layers
snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case : List[Any] = {0: '''batch'''}
snake_case : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case ,snake_case : Union[str, Any] = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = super().outputs
else:
snake_case : int = super(UpperCamelCase__ , self ).outputs
if self.use_past:
snake_case ,snake_case : Optional[Any] = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
snake_case : Optional[int] = seq_length if not self.use_past else 1
snake_case : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
snake_case : Any = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case ,snake_case : Dict = common_inputs['''input_ids'''].shape
snake_case : Any = common_inputs['''decoder_input_ids'''].shape[1]
snake_case ,snake_case : Tuple = self.num_attention_heads
snake_case : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Union[str, Any] = decoder_seq_length + 3
snake_case : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
snake_case : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case ,snake_case : str = self.num_layers
snake_case : Union[str, Any] = min(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
snake_case : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
snake_case : Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : str = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case ,snake_case : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case : int = seqlen + 2
snake_case ,snake_case : int = self.num_layers
snake_case ,snake_case : List[Any] = self.num_attention_heads
snake_case : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Tuple = common_inputs['''attention_mask'''].dtype
snake_case : Optional[int] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
snake_case : str = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : List[str] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
snake_case : str = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case : str = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : Union[str, Any] = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
snake_case : Tuple = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : List[Any] = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case : Optional[int] = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 638
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowercase__ = get_logger(__name__)
class snake_case__ ( enum.Enum ):
"""simple docstring"""
lowerCamelCase = """all_checks"""
lowerCamelCase = """basic_checks"""
lowerCamelCase = """no_checks"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> str:
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Any = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case : Union[str, Any] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Dict = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE__ ) )
logger.info('''All the splits matched successfully.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ) -> dict:
'''simple docstring'''
if record_checksum:
snake_case : Any = shaaaa()
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(SCREAMING_SNAKE_CASE__ )
snake_case : Any = m.hexdigest()
else:
snake_case : Tuple = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE__ ), "checksum": checksum}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 638
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase="pt" ) -> List[str]:
'''simple docstring'''
lowercase_ = {"""add_prefix_space""": True} if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not line.startswith(""" """ ) else {}
lowercase_ = padding_side
return tokenizer(
[line] , max_length=__lowerCAmelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = input_ids.ne(__lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict="train" , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[str]="" , ):
"""simple docstring"""
super().__init__()
lowercase_ = Path(lowerCAmelCase_).joinpath(type_path + """.source""")
lowercase_ = Path(lowerCAmelCase_).joinpath(type_path + """.target""")
lowercase_ = self.get_char_lens(self.src_file)
lowercase_ = max_source_length
lowercase_ = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
lowercase_ = tokenizer
lowercase_ = prefix
if n_obs is not None:
lowercase_ = self.src_lens[:n_obs]
lowercase_ = src_lang
lowercase_ = tgt_lang
def __len__( self : str):
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Optional[int] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = index + 1 # linecache starts at 1
lowercase_ = self.prefix + linecache.getline(str(self.src_file) , lowerCAmelCase_).rstrip("""\n""")
lowercase_ = linecache.getline(str(self.tgt_file) , lowerCAmelCase_).rstrip("""\n""")
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase_):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase_) else self.tokenizer
)
lowercase_ = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase_) else self.tokenizer
lowercase_ = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_source_length , """right""")
lowercase_ = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_target_length , """right""")
lowercase_ = source_inputs["""input_ids"""].squeeze()
lowercase_ = target_inputs["""input_ids"""].squeeze()
lowercase_ = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : List[Any]):
"""simple docstring"""
return [len(lowerCAmelCase_) for x in Path(lowerCAmelCase_).open().readlines()]
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = torch.stack([x["""input_ids"""] for x in batch])
lowercase_ = torch.stack([x["""attention_mask"""] for x in batch])
lowercase_ = torch.stack([x["""decoder_input_ids"""] for x in batch])
lowercase_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_)
else self.tokenizer.pad_token_id
)
lowercase_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_)
else self.tokenizer.pad_token_id
)
lowercase_ = trim_batch(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ , lowercase_ = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
lowercase_ = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase : Any = getLogger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = get_git_info()
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """git_log.json""" ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=4 , **__lowerCAmelCase ) -> Any:
'''simple docstring'''
with open(__lowerCAmelCase , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase , **__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with open(__lowerCAmelCase ) as f:
return json.load(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = git.Repo(search_parent_directories=__lowerCAmelCase )
lowercase_ = {
"""repo_id""": str(__lowerCAmelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List:
'''simple docstring'''
return list(map(__lowerCAmelCase , __lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
with open(__lowerCAmelCase , """wb""" ) as f:
return pickle.dump(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
def remove_articles(__lowerCAmelCase ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
lowercase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = normalize_answer(__lowerCAmelCase ).split()
lowercase_ = normalize_answer(__lowerCAmelCase ).split()
lowercase_ = Counter(__lowerCAmelCase ) & Counter(__lowerCAmelCase )
lowercase_ = sum(common.values() )
if num_same == 0:
return 0
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = 0
for hypo, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
em += exact_match_score(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
em /= len(__lowerCAmelCase )
return {"em": em}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ = """dropout_rate"""
for p in extra_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and not hasattr(__lowerCAmelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
continue
lowercase_ = p if hasattr(__lowerCAmelCase , __lowerCAmelCase ) else equivalent_param[p]
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
return hparams, config
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 100
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( A , A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Optional[Any] = IFInpaintingSuperResolutionPipeline
lowercase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase_ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase_ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self : List[Any] , snake_case__ : List[str] , snake_case__ : List[str]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : Dict = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : int = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
self._test_save_load_local()
def UpperCamelCase ( self : int ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 199
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Tuple = PriorTransformer
lowercase_ : Dict = "hidden_states"
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 4
UpperCAmelCase__ : List[Any] = 8
UpperCAmelCase__ : Union[str, Any] = 7
UpperCAmelCase__ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(snake_case__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
'''simple docstring'''
torch.manual_seed(snake_case__ )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : int = 8
UpperCAmelCase__ : Optional[int] = 7
UpperCAmelCase__ : Any = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
UpperCAmelCase__ : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(snake_case__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return (4, 8)
@property
def UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (4, 8)
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case__ )
UpperCAmelCase__ : List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : Tuple = self.model_class(**snake_case__ )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : int = [*signature.parameters.keys()]
UpperCAmelCase__ : Optional[Any] = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , snake_case__ )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
if hasattr(snake_case__ , "set_default_attn_processor" ):
model.set_default_attn_processor()
UpperCAmelCase__ : int = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(**snake_case__ )[0]
UpperCAmelCase__ : Any = output[0, :5].flatten().cpu()
print(snake_case__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase__ : Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1e-2 ) )
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=1 , snake_case__ : Any=7_68 , snake_case__ : List[Any]=77 , snake_case__ : Tuple=0 ):
'''simple docstring'''
torch.manual_seed(snake_case__ )
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Any = embedding_dim
UpperCAmelCase__ : Any = num_embeddings
UpperCAmelCase__ : str = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(snake_case__ )
UpperCAmelCase__ : Any = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(snake_case__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(snake_case__ )
UpperCAmelCase__ : List[Any] = self.get_dummy_seed_input(seed=snake_case__ )
with torch.no_grad():
UpperCAmelCase__ : Any = model(**snake_case__ )[0]
assert list(sample.shape ) == [1, 7_68]
UpperCAmelCase__ : List[str] = sample[0, :8].flatten().cpu()
print(snake_case__ )
UpperCAmelCase__ : Tuple = torch.tensor(snake_case__ )
assert torch_all_close(snake_case__ , snake_case__ , atol=1e-3 )
| 199
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = ("DownEncoderBlock2D",) ,UpperCAmelCase_ = ("UpDecoderBlock2D",) ,UpperCAmelCase_ = (64,) ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = "silu" ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 2_56 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0.18215 ,UpperCAmelCase_ = "group" ,):
super().__init__()
# pass init params to Encoder
_lowercase : List[Any] = Encoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,down_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,double_z=UpperCAmelCase_ ,)
_lowercase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : int = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
_lowercase : Union[str, Any] = VectorQuantizer(UpperCAmelCase_ ,UpperCAmelCase_ ,beta=0.25 ,remap=UpperCAmelCase_ ,sane_index_shape=UpperCAmelCase_ )
_lowercase : Union[str, Any] = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
# pass init params to Decoder
_lowercase : Union[str, Any] = Decoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,up_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,norm_type=UpperCAmelCase_ ,)
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : Any = self.encoder(UpperCAmelCase_ )
_lowercase : List[Any] = self.quant_conv(UpperCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_ )
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ):
# also go through quantization layer
if not force_not_quantize:
_lowercase : Union[str, Any] = self.quantize(UpperCAmelCase_ )
else:
_lowercase : int = h
_lowercase : Union[str, Any] = self.post_quant_conv(UpperCAmelCase_ )
_lowercase : List[Any] = self.decoder(UpperCAmelCase_ ,quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : List[Any] = sample
_lowercase : Optional[Any] = self.encode(UpperCAmelCase_ ).latents
_lowercase : int = self.decode(UpperCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
| 711
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["image_processor", "feature_extractor"]
SCREAMING_SNAKE_CASE_ : Dict = "TvltImageProcessor"
SCREAMING_SNAKE_CASE_ : List[Any] = "TvltFeatureExtractor"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(image_processor=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ )
_lowercase : Any = image_processor
_lowercase : Any = feature_extractor
def __call__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,):
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_lowercase : int = None
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,mask_pixel=UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
if images_mixed is not None:
_lowercase : Optional[int] = self.image_processor(UpperCAmelCase_ ,is_mixed=UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
if audio is not None:
_lowercase : Tuple = self.feature_extractor(
UpperCAmelCase_ ,*UpperCAmelCase_ ,sampling_rate=UpperCAmelCase_ ,mask_audio=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = {}
if audio is not None:
output_dict.update(UpperCAmelCase_ )
if images is not None:
output_dict.update(UpperCAmelCase_ )
if images_mixed_dict is not None:
output_dict.update(UpperCAmelCase_ )
return output_dict
@property
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.image_processor.model_input_names
_lowercase : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 600
| 0
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
A_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , **A__ ):
super().__init__(**A__ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self , A__ , **A__ ):
return super().__call__(A__ , **A__ )
def __A ( self , **A__ ):
A__ : int = {}
if "candidate_labels" in kwargs:
A__ : Any = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ : List[str] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __A ( self , A__ , A__=None , A__="This is a sound of {}." ):
if isinstance(A__ , A__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ : Union[str, Any] = requests.get(A__ ).content
else:
with open(A__ , """rb""" ) as f:
A__ : str = f.read()
if isinstance(A__ , A__ ):
A__ : Optional[int] = ffmpeg_read(A__ , self.feature_extractor.sampling_rate )
if not isinstance(A__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ : List[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ : Optional[Any] = candidate_labels
A__ : int = [hypothesis_template.format(A__ ) for x in candidate_labels]
A__ : Dict = self.tokenizer(A__ , return_tensors=self.framework , padding=A__ )
A__ : Tuple = [text_inputs]
return inputs
def __A ( self , A__ ):
A__ : Any = model_inputs.pop("""candidate_labels""" )
A__ : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A__ ):
A__ : Tuple = text_inputs[0]
else:
# Batching case.
A__ : int = text_inputs[0][0]
A__ : Optional[int] = self.model(**A__ , **A__ )
A__ : Dict = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def __A ( self , A__ ):
A__ : Optional[Any] = model_outputs.pop("""candidate_labels""" )
A__ : Optional[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
A__ : str = logits.softmax(dim=0 )
A__ : List[str] = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ : int = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A__ , A__ ) , key=lambda A__ : -x[0] )
]
return result
| 456
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Tuple = {'vocab_file': 'spiece.model'}
A_ : Optional[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
A_ : Tuple = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__: str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[int] = []
def __init__( self , A__ , A__="<unk>" , A__="<s>" , A__="</s>" , A__="<pad>" , A__="[SEP]" , A__="[MASK]" , A__="[CLS]" , A__ = None , **A__ , ):
A__ : Tuple = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else bos_token
A__ : int = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else eos_token
A__ : Tuple = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else unk_token
A__ : str = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else pad_token
A__ : Dict = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else cls_token
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : Optional[Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , pad_token=A__ , sep_token=A__ , mask_token=A__ , cls_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
A__ : Any = vocab_file
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def __A ( self ):
return self.sp_model.get_piece_size()
def __A ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A__ : Optional[Any] = self.__dict__.copy()
A__ : Optional[Any] = None
return state
def __setstate__( self , A__ ):
A__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : str = {}
A__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , A__ ):
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
return self.sp_model.piece_to_id(A__ )
def __A ( self , A__ ):
A__ : Dict = self.sp_model.IdToPiece(A__ )
return token
def __A ( self , A__ ):
A__ : str = []
A__ : Optional[int] = """"""
A__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
A__ : Union[str, Any] = True
A__ : Union[str, Any] = []
else:
current_sub_tokens.append(A__ )
A__ : Any = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __A ( self , A__ , A__ = False , A__ = None , A__ = True , **A__ , ):
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , A__ )
A__ : Union[str, Any] = self.convert_ids_to_tokens(A__ , skip_special_tokens=A__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Dict = []
A__ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A__ ) )
A__ : Optional[Any] = []
sub_texts.append(A__ )
else:
current_sub_text.append(A__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A__ : List[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(A__ ) )
else:
A__ : Optional[Any] = """""".join(A__ )
A__ : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Union[str, Any] = self.clean_up_tokenization(A__ )
return clean_text
else:
return text
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : str = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : Dict = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : List[Any] = [self.cls_token_id]
A__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def __A ( self , A__ , A__ = None ):
A__ : Any = [self.sep_token_id]
A__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 456
| 1
|
from math import isqrt
def __a ( __UpperCAmelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase__ ) + 1 ) )
def __a ( __UpperCAmelCase = 10**6 ):
a__ = 0
a__ = 1
a__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 715
|
import argparse
from collections import defaultdict
import yaml
a_ : Tuple = 'docs/source/en/_toctree.yml'
def __a ( __UpperCAmelCase ):
a__ = defaultdict(__UpperCAmelCase )
a__ = []
a__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__UpperCAmelCase )
a__ = new_doc_list
a__ = [key for key, value in counts.items() if value > 1]
a__ = []
for duplicate_key in duplicates:
a__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
a__ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCAmelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__UpperCAmelCase )
# Sort
return overview_doc
def __a ( __UpperCAmelCase=False ):
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
a__ = yaml.safe_load(f.read() )
# Get to the API doc
a__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ = content[api_idx]['''sections''']
# Then to the model doc
a__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
a__ = api_doc[scheduler_idx]['''sections''']
a__ = clean_doc_toc(__UpperCAmelCase )
a__ = False
if new_scheduler_doc != scheduler_doc:
a__ = True
if overwrite:
a__ = new_scheduler_doc
if diff:
if overwrite:
a__ = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def __a ( __UpperCAmelCase=False ):
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
a__ = yaml.safe_load(f.read() )
# Get to the API doc
a__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ = content[api_idx]['''sections''']
# Then to the model doc
a__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
a__ = False
a__ = api_doc[pipeline_idx]['''sections''']
a__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
a__ = pipeline_doc['''section''']
a__ = clean_doc_toc(__UpperCAmelCase )
if overwrite:
a__ = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCAmelCase )
# sort overall pipeline doc
a__ = clean_doc_toc(__UpperCAmelCase )
if new_pipeline_docs != pipeline_docs:
a__ = True
if overwrite:
a__ = new_pipeline_docs
if diff:
if overwrite:
a__ = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 148
| 0
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_UpperCamelCase = '''CompVis/stable-diffusion-v1-1'''
_UpperCamelCase = '''CompVis/stable-diffusion-v1-2'''
_UpperCamelCase = '''CompVis/stable-diffusion-v1-3'''
_UpperCamelCase = '''CompVis/stable-diffusion-v1-4'''
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a , __a , __a , __a , __a , __a , __a = True , ) -> Tuple:
"""simple docstring"""
super()._init_()
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__a )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__a )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__a )
UpperCAmelCase__ = StableDiffusionPipeline(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , requires_safety_checker=__a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase__ (self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __a ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase__ (self , __a = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(__a )
@torch.no_grad()
def UpperCamelCase__ (self , __a , __a = 512 , __a = 512 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def UpperCamelCase__ (self , __a , __a = 512 , __a = 512 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> Tuple:
"""simple docstring"""
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def UpperCamelCase__ (self , __a , __a = 512 , __a = 512 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> int:
"""simple docstring"""
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def UpperCamelCase__ (self , __a , __a = 512 , __a = 512 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def UpperCamelCase__ (self , __a , __a = 512 , __a = 512 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 146
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCamelCase = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
_UpperCamelCase = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
_UpperCamelCase = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase__ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 146
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 391
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__: Optional[int] , A__: List[Any] , A__: str ):
'''simple docstring'''
UpperCAmelCase = LxmertConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 391
| 1
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 590
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__snake_case : Dict = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , x.transpose() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : str = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(3 , 4 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case : int = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , np.asarray(transpose(_UpperCAmelCase ) ) ) )
__snake_case : List[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.reshape(_UpperCAmelCase , (4, 3) ) ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.reshape(_UpperCAmelCase , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Optional[int] = np.random.randn(3 , 4 )
__snake_case : Optional[int] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : int = np.random.randn(3 , 4 , 5 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[int] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.asarray(reshape(_UpperCAmelCase , (4, 3) ) ) ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.asarray(reshape(_UpperCAmelCase , (12, 5) ) ) ) )
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.squeeze(_UpperCAmelCase ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.squeeze(_UpperCAmelCase , axis=2 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(1 , 3 , 4 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Tuple = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Dict = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.asarray(squeeze(_UpperCAmelCase ) ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.asarray(squeeze(_UpperCAmelCase , axis=2 ) ) ) )
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.expand_dims(_UpperCAmelCase , axis=1 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Dict = np.random.randn(3 , 4 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(_UpperCAmelCase , axis=1 ) ) ) )
| 576
| 0
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowerCAmelCase() -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(a ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _lowerCAmelCase() -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _lowerCAmelCase() -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(a ):
http_head('''https://huggingface.co''' )
| 705
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , _A=1_0_0_0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =range_bbox
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 3]
_SCREAMING_SNAKE_CASE =bbox[i, j, 1]
_SCREAMING_SNAKE_CASE =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 2]
_SCREAMING_SNAKE_CASE =bbox[i, j, 0]
_SCREAMING_SNAKE_CASE =t
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(_A )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMModel(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , token_type_ids=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForMaskedLM(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFLayoutLMForSequenceClassification(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFLayoutLMForTokenClassification(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForQuestionAnswering(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowercase : str = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : Any = False
lowercase : List[str] = True
lowercase : Optional[int] = 10
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_A , hidden_size=3_7 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFLayoutLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase() -> str:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the sequence output on [0, :3, :3]
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-3 ) )
# test the pooled output on [1, :3]
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _A , atol=1E-3 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_SCREAMING_SNAKE_CASE =outputs.loss
_SCREAMING_SNAKE_CASE =(2,)
self.assertEqual(loss.shape , _A )
# test the shape of the logits
_SCREAMING_SNAKE_CASE =outputs.logits
_SCREAMING_SNAKE_CASE =(2, 2)
self.assertEqual(logits.shape , _A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
# test the shape of the logits
_SCREAMING_SNAKE_CASE =outputs.logits
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the shape of the logits
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _A )
self.assertEqual(outputs.end_logits.shape , _A )
| 165
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Optional[int] = '''MobileNetV1Config'''
# Base docstring
__A : str = '''google/mobilenet_v1_1.0_224'''
__A : List[str] = [1, 1024, 7, 7]
# Image classification docstring
__A : Tuple = '''google/mobilenet_v1_1.0_224'''
__A : str = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[str] = {}
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : Tuple = model.mobilenet_va
else:
lowerCAmelCase : Dict = model
lowerCAmelCase : List[str] = 'MobilenetV1/Conv2d_0/'
lowerCAmelCase : Dict = backbone.conv_stem.convolution.weight
lowerCAmelCase : Union[str, Any] = backbone.conv_stem.normalization.bias
lowerCAmelCase : Any = backbone.conv_stem.normalization.weight
lowerCAmelCase : str = backbone.conv_stem.normalization.running_mean
lowerCAmelCase : Optional[Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCAmelCase : int = i + 1
lowerCAmelCase : Tuple = i * 2
lowerCAmelCase : int = backbone.layer[pt_index]
lowerCAmelCase : Optional[Any] = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
lowerCAmelCase : int = pointer.convolution.weight
lowerCAmelCase : Union[str, Any] = pointer.normalization.bias
lowerCAmelCase : List[str] = pointer.normalization.weight
lowerCAmelCase : int = pointer.normalization.running_mean
lowerCAmelCase : Tuple = pointer.normalization.running_var
lowerCAmelCase : List[Any] = backbone.layer[pt_index + 1]
lowerCAmelCase : Any = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
lowerCAmelCase : Union[str, Any] = pointer.convolution.weight
lowerCAmelCase : str = pointer.normalization.bias
lowerCAmelCase : Tuple = pointer.normalization.weight
lowerCAmelCase : List[str] = pointer.normalization.running_mean
lowerCAmelCase : List[Any] = pointer.normalization.running_var
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
lowerCAmelCase : List[str] = model.classifier.weight
lowerCAmelCase : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
lowerCAmelCase : List[Any] = tf.train.list_variables(_UpperCAmelCase )
lowerCAmelCase : Tuple = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
lowerCAmelCase : int = tf.train.load_variable(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Any = array
# Build TF to PyTorch weights loading map
lowerCAmelCase : Any = _build_tf_to_pytorch_map(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
lowerCAmelCase : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
lowerCAmelCase : str = np.transpose(_UpperCAmelCase, (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCAmelCase : Tuple = array.squeeze().transpose()
else:
lowerCAmelCase : Dict = np.transpose(_UpperCAmelCase, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
lowerCAmelCase : Tuple = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase, _UpperCAmelCase )
tf_weights.pop(name + '/RMSProp', _UpperCAmelCase )
tf_weights.pop(name + '/RMSProp_1', _UpperCAmelCase )
tf_weights.pop(name + '/ExponentialMovingAverage', _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Optional[int] = features.shape[-2:]
lowerCAmelCase , lowerCAmelCase : List[Any] = conv_layer.stride
lowerCAmelCase , lowerCAmelCase : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase : int = max(kernel_height - stride_height, 0 )
else:
lowerCAmelCase : Tuple = max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
lowerCAmelCase : Union[str, Any] = max(kernel_width - stride_width, 0 )
else:
lowerCAmelCase : Optional[int] = max(kernel_width - (in_width % stride_width), 0 )
lowerCAmelCase : str = pad_along_width // 2
lowerCAmelCase : List[str] = pad_along_width - pad_left
lowerCAmelCase : List[str] = pad_along_height // 2
lowerCAmelCase : Optional[int] = pad_along_height - pad_top
lowerCAmelCase : Optional[int] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase, _UpperCAmelCase, 'constant', 0.0 )
class __A ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool or str] = True , ):
super().__init__()
lowerCAmelCase : str = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
lowerCAmelCase : Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase : Any = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode='zeros' , )
if use_normalization:
lowerCAmelCase : str = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
lowerCAmelCase : Optional[Any] = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
lowerCAmelCase : Dict = ACTaFN[config.hidden_act]
else:
lowerCAmelCase : Any = config.hidden_act
else:
lowerCAmelCase : List[Any] = None
def lowercase__ ( self : Any , UpperCAmelCase_ : torch.Tensor ):
if self.config.tf_padding:
lowerCAmelCase : int = apply_tf_padding(UpperCAmelCase_ , self.convolution )
lowerCAmelCase : Dict = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
lowerCAmelCase : Optional[Any] = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
lowerCAmelCase : int = self.activation(UpperCAmelCase_ )
return features
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = MobileNetVaConfig
lowerCAmelCase_ : Tuple = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ : str = "mobilenet_v1"
lowerCAmelCase_ : str = "pixel_values"
lowerCAmelCase_ : Optional[int] = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[nn.Linear, nn.Convad] ):
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : List[str] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[Any] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Any , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : bool = True ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = config
lowerCAmelCase : Tuple = 32
lowerCAmelCase : Tuple = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCAmelCase : Optional[Any] = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
lowerCAmelCase : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase : str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
lowerCAmelCase : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Dict ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
lowerCAmelCase : Dict = self.conv_stem(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase : Union[str, Any] = layer_module(UpperCAmelCase_ )
if output_hidden_states:
lowerCAmelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
lowerCAmelCase : Dict = hidden_states
if self.pooler is not None:
lowerCAmelCase : Any = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
lowerCAmelCase : Union[str, Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : MobileNetVaConfig ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = config.num_labels
lowerCAmelCase : Optional[int] = MobileNetVaModel(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
lowerCAmelCase : str = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase : Tuple = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase : Optional[int] = self.classifier(self.dropout(UpperCAmelCase_ ) )
lowerCAmelCase : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase : Union[str, Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase : Union[str, Any] = 'single_label_classification'
else:
lowerCAmelCase : Any = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCAmelCase : int = MSELoss()
if self.num_labels == 1:
lowerCAmelCase : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase : int = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase : int = CrossEntropyLoss()
lowerCAmelCase : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase : Optional[Any] = BCEWithLogitsLoss()
lowerCAmelCase : Tuple = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 343
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : str = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = "glpn"
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[Any]=[8, 4, 2, 1] , UpperCAmelCase_ : Optional[int]=[32, 64, 160, 256] , UpperCAmelCase_ : Union[str, Any]=[7, 3, 3, 3] , UpperCAmelCase_ : Dict=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Any=[4, 4, 4, 4] , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=-1 , **UpperCAmelCase_ : List[str] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = num_encoder_blocks
lowerCAmelCase : Union[str, Any] = depths
lowerCAmelCase : List[str] = sr_ratios
lowerCAmelCase : str = hidden_sizes
lowerCAmelCase : List[str] = patch_sizes
lowerCAmelCase : Any = strides
lowerCAmelCase : List[str] = mlp_ratios
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Tuple = drop_path_rate
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = decoder_hidden_size
lowerCAmelCase : List[Any] = max_depth
lowerCAmelCase : str = head_in_index
| 343
| 1
|
"""simple docstring"""
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> float:
_validate_point(__UpperCamelCase )
_validate_point(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(__UpperCamelCase , __UpperCamelCase ) ) )
def lowercase ( __UpperCamelCase ) -> None:
if point:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for item in point:
if not isinstance(__UpperCamelCase , (int, float) ):
__magic_name__ = (
'''Expected a list of numbers as input, found '''
f'''{type(__UpperCamelCase ).__name__}'''
)
raise TypeError(__UpperCamelCase )
else:
__magic_name__ = f'''Expected a list of numbers as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
else:
raise ValueError('''Missing an input''' )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> float:
_validate_point(__UpperCamelCase )
_validate_point(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(__UpperCamelCase , __UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
|
"""simple docstring"""
class _lowercase :
def __init__( self , UpperCamelCase_ ):
__magic_name__ = size
__magic_name__ = [0] * size
__magic_name__ = [0] * size
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ ):
return index | (index + 1)
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ ):
return (index & (index + 1)) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = value
while index < self.size:
__magic_name__ = self.get_prev(UpperCamelCase_ ) + 1
if current_left_border == index:
__magic_name__ = value
else:
__magic_name__ = max(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.get_next(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
right -= 1 # Because of right is exclusive
__magic_name__ = 0
while left <= right:
__magic_name__ = self.get_prev(UpperCamelCase_ )
if left <= current_left:
__magic_name__ = max(UpperCamelCase_ , self.tree[right] )
__magic_name__ = current_left
else:
__magic_name__ = max(UpperCamelCase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = SamImageProcessor()
_lowerCAmelCase = SamProcessor(lowercase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **lowercase__ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase__ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
_lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=lowercase__ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowercase__ , return_tensors='np' )
_lowerCAmelCase = processor(images=lowercase__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=lowercase__ )
_lowerCAmelCase = [torch.ones((1, 3, 5, 5) )]
_lowerCAmelCase = [[17_64, 26_46]]
_lowerCAmelCase = [[6_83, 10_24]]
_lowerCAmelCase = processor.post_process_masks(lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = processor.post_process_masks(
lowercase__ , torch.tensor(lowercase__ ) , torch.tensor(lowercase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase = processor.post_process_masks(lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(lowercase__ ):
_lowerCAmelCase = processor.post_process_masks(lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) )
@require_vision
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = SamImageProcessor()
_lowerCAmelCase = SamProcessor(lowercase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any , **lowercase__ : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase__ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
_lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=lowercase__ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowercase__ , return_tensors='np' )
_lowerCAmelCase = processor(images=lowercase__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=lowercase__ )
_lowerCAmelCase = [tf.ones((1, 3, 5, 5) )]
_lowerCAmelCase = [[17_64, 26_46]]
_lowerCAmelCase = [[6_83, 10_24]]
_lowerCAmelCase = processor.post_process_masks(lowercase__ , lowercase__ , lowercase__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = processor.post_process_masks(
lowercase__ , tf.convert_to_tensor(lowercase__ ) , tf.convert_to_tensor(lowercase__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase = processor.post_process_masks(
lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_lowerCAmelCase = processor.post_process_masks(
lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = SamImageProcessor()
_lowerCAmelCase = SamProcessor(lowercase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **lowercase__ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase__ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=lowercase__ )
_lowerCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_lowerCAmelCase = [tf.convert_to_tensor(lowercase__ )]
_lowerCAmelCase = [torch.tensor(lowercase__ )]
_lowerCAmelCase = [[17_64, 26_46]]
_lowerCAmelCase = [[6_83, 10_24]]
_lowerCAmelCase = processor.post_process_masks(
lowercase__ , lowercase__ , lowercase__ , return_tensors='tf' )
_lowerCAmelCase = processor.post_process_masks(
lowercase__ , lowercase__ , lowercase__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=lowercase__ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowercase__ , return_tensors='pt' )['pixel_values'].numpy()
_lowerCAmelCase = processor(images=lowercase__ , return_tensors='pt' )['pixel_values'].numpy()
_lowerCAmelCase = image_processor(lowercase__ , return_tensors='tf' )['pixel_values'].numpy()
_lowerCAmelCase = processor(images=lowercase__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
| 192
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase: Any = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_lowercase: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 192
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( lowercase_ ):
__magic_name__ = '''unispeech'''
def __init__( self : Optional[Any] , A : int=3_2 , A : List[Any]=7_6_8 , A : Any=1_2 , A : Union[str, Any]=1_2 , A : Optional[int]=3_0_7_2 , A : List[Any]="gelu" , A : Union[str, Any]=0.1 , A : Any=0.1 , A : List[Any]=0.1 , A : Any=0.0 , A : Optional[Any]=0.0 , A : List[Any]=0.1 , A : Optional[Any]=0.1 , A : Optional[Any]=0.02 , A : List[Any]=1E-5 , A : List[Any]="group" , A : Optional[Any]="gelu" , A : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A : Tuple=(5, 2, 2, 2, 2, 2, 2) , A : List[str]=(1_0, 3, 3, 3, 3, 2, 2) , A : Optional[int]=False , A : str=1_2_8 , A : int=1_6 , A : str=False , A : Tuple=True , A : Optional[int]=0.05 , A : Dict=1_0 , A : Tuple=2 , A : Union[str, Any]=0.0 , A : Dict=1_0 , A : Optional[Any]=0 , A : Union[str, Any]=3_2_0 , A : Dict=2 , A : List[str]=0.1 , A : str=1_0_0 , A : List[Any]=2_5_6 , A : Tuple=2_5_6 , A : Dict=0.1 , A : Any="mean" , A : Optional[int]=False , A : Any=False , A : List[str]=2_5_6 , A : Optional[Any]=8_0 , A : int=0 , A : Optional[Any]=1 , A : str=2 , A : Dict=0.5 , **A : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
a : Optional[Any] = hidden_size
a : List[str] = feat_extract_norm
a : int = feat_extract_activation
a : List[str] = list(lowerCamelCase_ )
a : str = list(lowerCamelCase_ )
a : Tuple = list(lowerCamelCase_ )
a : int = conv_bias
a : Tuple = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : List[str] = len(self.conv_dim )
a : str = num_hidden_layers
a : List[str] = intermediate_size
a : int = hidden_act
a : int = num_attention_heads
a : List[Any] = hidden_dropout
a : Dict = attention_dropout
a : Any = activation_dropout
a : Union[str, Any] = feat_proj_dropout
a : Optional[Any] = final_dropout
a : List[Any] = layerdrop
a : Tuple = layer_norm_eps
a : Union[str, Any] = initializer_range
a : Optional[int] = num_ctc_classes
a : List[Any] = vocab_size
a : Optional[int] = do_stable_layer_norm
a : Any = use_weighted_layer_sum
a : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : Optional[int] = apply_spec_augment
a : Any = mask_time_prob
a : Any = mask_time_length
a : Dict = mask_time_min_masks
a : Tuple = mask_feature_prob
a : str = mask_feature_length
a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a : str = num_codevectors_per_group
a : Dict = num_codevector_groups
a : Union[str, Any] = contrastive_logits_temperature
a : int = feat_quantizer_dropout
a : List[str] = num_negatives
a : Dict = codevector_dim
a : Optional[int] = proj_codevector_dim
a : int = diversity_loss_weight
# ctc loss
a : Dict = ctc_loss_reduction
a : Optional[int] = ctc_zero_infinity
# pretraining loss
a : Tuple = replace_prob
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 713
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def snake_case (A_ :int = 1_0_0_0_0_0_0 , A_ :int = 1_0 ):
'''simple docstring'''
a : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a : Optional[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 118
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ (lowercase__ ):
snake_case =['image_processor', 'tokenizer']
snake_case ='ViltImageProcessor'
snake_case =('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Optional[Any]:
a__ =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
a__ =kwargs.pop('feature_extractor')
a__ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
a__ =self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
a__ =self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
a__ =self.image_processor(lowercase_ , return_tensors=lowercase_)
encoding.update(lowercase_)
return encoding
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.tokenizer.model_input_names
a__ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 20
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : Dict=False , lowerCamelCase : Any=False ) -> Union[str, Any]:
lowerCAmelCase__ : str = "backbone." if is_semantic else ""
lowerCAmelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : int=False , lowerCamelCase : Union[str, Any]=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ : Optional[int] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCAmelCase__ : Any = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
lowerCAmelCase__ : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
lowerCAmelCase__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = q_bias
lowerCAmelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : List[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
lowerCAmelCase__ : Union[str, Any] = gamma_a
lowerCAmelCase__ : Optional[Any] = gamma_a
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ) -> List[Any]:
lowerCAmelCase__ : Dict = dct.pop(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = val
def lowercase__ ( ) -> Any:
lowerCAmelCase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : List[str]=False ) -> int:
lowerCAmelCase__ : Optional[int] = False if "rvlcdip" in checkpoint_url else True
lowerCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=lowerCamelCase , use_mask_token=lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_0_2_4
lowerCAmelCase__ : Any = 4_0_9_6
lowerCAmelCase__ : int = 2_4
lowerCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_6
lowerCAmelCase__ : str = "huggingface/label-files"
lowerCAmelCase__ : List[str] = "rvlcdip-id2label.json"
lowerCAmelCase__ : Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : Tuple = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
lowerCAmelCase__ : Union[str, Any] = create_rename_keys(lowerCamelCase , has_lm_head=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , has_lm_head=lowerCamelCase )
# load HuggingFace model
lowerCAmelCase__ : Union[str, Any] = BeitForMaskedImageModeling(lowerCamelCase ) if has_lm_head else BeitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# Check outputs on an image
lowerCAmelCase__ : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=lowerCamelCase , return_tensors="pt" )
lowerCAmelCase__ : Any = encoding["pixel_values"]
lowerCAmelCase__ : Optional[Any] = model(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# verify logits
lowerCAmelCase__ : str = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowerCamelCase ), "Shape of logits not as expected"
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
if has_lm_head:
lowerCAmelCase__ : List[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCAmelCase__ : Tuple = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCamelCase : Optional[Any] = ["""small""", """medium""", """large"""]
_lowerCamelCase : Optional[Any] = """lm_head.decoder.weight"""
_lowerCamelCase : Dict = """lm_head.weight"""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = torch.load(__SCREAMING_SNAKE_CASE )
A__ = d.pop(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
_lowerCamelCase : int = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCamelCase : Dict = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
_lowerCamelCase : List[Any] = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 704
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Optional[int]=32 * 4 , UpperCAmelCase__ : int=32 * 6 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Dict=32 , ) ->Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = is_training
A__ = use_auxiliary_loss
A__ = num_queries
A__ = num_channels
A__ = min_size
A__ = max_size
A__ = num_labels
A__ = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCAmelCase__)
A__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase__)
A__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase__) > 0.5
).float()
A__ = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase__) > 0.5).long()
A__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]) ->int:
'''simple docstring'''
A__ = output.encoder_hidden_states
A__ = output.pixel_decoder_hidden_states
A__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase__) , config.decoder_config.decoder_layers)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=False) ->Optional[int]:
'''simple docstring'''
with torch.no_grad():
A__ = MaskFormerModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = MaskFormerForInstanceSegmentation(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
def comm_check_on_output(UpperCAmelCase__ : str):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
A__ = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
comm_check_on_output(UpperCAmelCase__)
A__ = model(
pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__)
comm_check_on_output(UpperCAmelCase__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = MaskFormerModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase__)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
A__ = MaskFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = (self.model_tester.min_size,) * 2
A__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCAmelCase__),
'''mask_labels''': torch.randn((2, 10, *size) , device=UpperCAmelCase__),
'''class_labels''': torch.zeros(2 , 10 , device=UpperCAmelCase__).long(),
}
A__ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCAmelCase__)
A__ = model(**UpperCAmelCase__)
self.assertTrue(outputs.loss is not None)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__).to(UpperCAmelCase__)
A__ = model(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__)
self.assertTrue(outputs.attentions is not None)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
A__ = self.all_model_classes[1]
A__ , A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.train()
A__ = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = self.all_model_classes[1]
A__ , A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = True
A__ = True
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.train()
A__ = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
A__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_lowerCamelCase : Tuple = 1E-4
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
A__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]]).to(UpperCAmelCase__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
A__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]]).to(UpperCAmelCase__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
A__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]]).to(UpperCAmelCase__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(UpperCAmelCase__)
.eval()
)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# masks_queries_logits
A__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A__ = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
A__ = torch.tensor(UpperCAmelCase__).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
# class_queries_logits
A__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
A__ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(UpperCAmelCase__)
.eval()
)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# masks_queries_logits
A__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
A__ = torch.tensor(UpperCAmelCase__).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
# class_queries_logits
A__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
A__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(UpperCAmelCase__)
.eval()
)
A__ = self.default_image_processor
A__ = image_processor(
[np.zeros((3, 800, 1_333)), np.zeros((3, 800, 1_333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
A__ = inputs['''pixel_values'''].to(UpperCAmelCase__)
A__ = [el.to(UpperCAmelCase__) for el in inputs['''mask_labels''']]
A__ = [el.to(UpperCAmelCase__) for el in inputs['''class_labels''']]
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
self.assertTrue(outputs.loss is not None)
| 177
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _A( UpperCamelCase__ : int ) -> str:
'''simple docstring'''
__lowercase = {}
__lowercase = job['''started_at''']
__lowercase = job['''completed_at''']
__lowercase = date_parser.parse(UpperCamelCase__ )
__lowercase = date_parser.parse(UpperCamelCase__ )
__lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__lowercase = start
__lowercase = end
__lowercase = duration_in_min
return job_info
def _A( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ) -> Optional[int]:
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
__lowercase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__lowercase = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
__lowercase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase__ ) for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase__ ):
__lowercase = requests.get(url + F'&page={i + 2}' , headers=UpperCamelCase__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = get_job_time(args.workflow_run_id)
UpperCAmelCase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 332
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=13 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Tuple=[10, 20, 30, 40] , lowerCamelCase__ : Any=[2, 2, 3, 2] , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : int=37 , lowerCamelCase__ : Optional[int]="gelu" , lowerCamelCase__ : str=10 , lowerCamelCase__ : Dict=0.0_2 , lowerCamelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , lowerCamelCase__ : Dict=[2, 3, 4] , lowerCamelCase__ : Any=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
__lowercase = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : int = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = True
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ):
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A( ) -> Tuple:
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class a ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase_ : str = ConvNextConfig
UpperCamelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 332
| 1
|
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( *lowerCAmelCase_ : float ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : float ,lowerCAmelCase_ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(_lowerCamelCase ,_lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 706
|
__SCREAMING_SNAKE_CASE = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
__SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 153
| 0
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCamelCase_ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
UpperCamelCase_ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def _lowerCAmelCase ( ) -> List[Any]:
lowercase , lowercase : Dict =randrange(len(__magic_name__ ) ), randrange(len(__magic_name__ ) )
lowercase : Optional[int] =['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase , lowercase : List[str] =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _lowerCAmelCase ( __magic_name__ : int = 100 ) -> Optional[int]:
return (generate_random_hand() for _ in range(__magic_name__ ))
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Any ) -> List[str]:
assert PokerHand(__magic_name__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[Any] ) -> Any:
assert PokerHand(__magic_name__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Any:
lowercase : Tuple =PokerHand(__magic_name__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Dict ) -> Any:
assert PokerHand(__magic_name__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple ) -> Tuple:
assert PokerHand(__magic_name__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> List[Any]:
assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> int:
assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected
def _lowerCAmelCase ( ) -> int:
lowercase : Dict =[PokerHand(__magic_name__ ) for hand in SORTED_HANDS]
lowercase : Tuple =poker_hands.copy()
shuffle(__magic_name__ )
lowercase : Union[str, Any] =chain(sorted(__magic_name__ ) )
for index, hand in enumerate(__magic_name__ ):
assert hand == poker_hands[index]
def _lowerCAmelCase ( ) -> Optional[int]:
# Test that five high straights are compared correctly.
lowercase : Optional[int] =[PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=__magic_name__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _lowerCAmelCase ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase : List[str] =PokerHand('''2C 4S AS 3D 5C''' )
lowercase : Optional[Any] =True
lowercase : Optional[Any] =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _lowerCAmelCase ( ) -> int:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase : Optional[Any] =0
lowercase : int =os.path.abspath(os.path.dirname(__magic_name__ ) )
lowercase : int =os.path.join(__magic_name__ , '''poker_hands.txt''' )
with open(__magic_name__ ) as file_hand:
for line in file_hand:
lowercase : Tuple =line[:14].strip()
lowercase : List[Any] =line[15:].strip()
lowercase , lowercase : Tuple =PokerHand(__magic_name__ ), PokerHand(__magic_name__ )
lowercase : Optional[Any] =player.compare_with(__magic_name__ )
if output == "Win":
answer += 1
assert answer == 376
| 92
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
UpperCAmelCase__ = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
UpperCAmelCase__ = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
UpperCAmelCase__ = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
UpperCAmelCase__ = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase__ = np.expand_dims(test_image, axis=0)
UpperCAmelCase__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase__ = 'Normal'
if result[0][0] == 1:
UpperCAmelCase__ = 'Abnormality detected'
| 224
| 0
|
'''simple docstring'''
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :str ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Union[str, Any] ):
if b < 0:
return 1 / actual_power(__lowerCamelCase , __lowerCamelCase )
return actual_power(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 714
|
'''simple docstring'''
def A (__lowerCamelCase :int = 100 ):
_lowerCAmelCase = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 162
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : str ):
return " ".join(
"".join(word[::-1] ) if len(_lowerCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 172
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
__UpperCamelCase : int = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" , revision=_lowerCAmelCase )
| 327
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = ScoreSdeVeScheduler()
_UpperCamelCase = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowercase_).images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowercase_ , return_dict=lowercase_)[
0
]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-church-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(lowercase_)
_UpperCamelCase = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82
| 1
|
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCAmelCase ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCAmelCase ):
http_head('''https://huggingface.co''' )
| 604
| 1
|
from math import sqrt
def _snake_case (__lowercase):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case (__lowercase = 10001):
UpperCamelCase_ = 0
UpperCamelCase_ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowercase):
count += 1
while count != nth:
number += 2
if is_prime(__lowercase):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 618
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
A_ = ["""accelerate""", """launch"""]
A_ = Path.home() / """.cache/huggingface/accelerate"""
A_ = """default_config.yaml"""
A_ = config_folder / config_file
A_ = config_folder / """_default_config.yaml"""
A_ = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Tuple:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = """test-tpu"""
A_ = """us-central1-a"""
A_ = """ls"""
A_ = ["""accelerate""", """tpu-config"""]
A_ = """cd /usr/share"""
A_ = """tests/test_samples/test_command_file.sh"""
A_ = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 618
| 1
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
__snake_case : Optional[int] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
__snake_case : str = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__snake_case : List[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
__snake_case : List[Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_lowerCamelCase )-1}''' )
if "norm" in key:
__snake_case : Union[str, Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__snake_case : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
__snake_case : List[Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_lowerCamelCase )-1}''' )
if "layer_norm1" in key:
__snake_case : Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
__snake_case : Any = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
__snake_case : Optional[int] = key[key.find("""block""" ) + len("""block""" )]
__snake_case : Optional[Any] = key.replace(F'''block{idx}''' , F'''block.{int(_lowerCamelCase )-1}''' )
if "attn.q" in key:
__snake_case : List[str] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
__snake_case : Union[str, Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
__snake_case : List[Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
__snake_case : str = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
__snake_case : Union[str, Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
__snake_case : List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
__snake_case : Any = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
__snake_case : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__snake_case : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
__snake_case : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_lowerCamelCase )-1}''' )
if "bot_conv" in key:
__snake_case : Dict = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
__snake_case : List[str] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
__snake_case : Optional[int] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
__snake_case : Union[str, Any] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
__snake_case : List[Any] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
__snake_case : List[Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
__snake_case : Dict = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
__snake_case : Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
__snake_case : int = value
return new_state_dict
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__snake_case : Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
__snake_case : int = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
__snake_case : str = kv_weight[
: config.hidden_sizes[i], :
]
__snake_case : Dict = kv_bias[: config.hidden_sizes[i]]
__snake_case : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
__snake_case : List[str] = kv_bias[config.hidden_sizes[i] :]
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__snake_case : List[Any] = GLPNImageProcessor()
# prepare image
__snake_case : str = prepare_img()
__snake_case : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase , map_location=torch.device("""cpu""" ) )
# rename keys
__snake_case : Any = rename_keys(_lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
__snake_case : Any = GLPNForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
__snake_case : Optional[Any] = model(_lowerCamelCase )
__snake_case : Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__snake_case : Any = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
__snake_case : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
__snake_case : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 26
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> float:
if density <= 0:
raise ValueError("Impossible fluid density")
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 515
| 0
|
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : int | float | str ,_UpperCAmelCase : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
__snake_case : Dict = int(_UpperCAmelCase )
__snake_case : Tuple = int(_UpperCAmelCase )
__snake_case : list[str] = []
for temp in range(int(_UpperCAmelCase ) ):
series.append(f'''1 / {pow(temp + 1 ,int(_UpperCAmelCase ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Optional[int] = int(input('''Enter the last number (nth term) of the P-Series'''))
A__ : List[Any] = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 706
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = XLMProphetNetTokenizer
A__ = False
A__ = True
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Optional[int] = XLMProphetNetTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : str ) -> int:
'''simple docstring'''
__snake_case : Tuple = '[PAD]'
__snake_case : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def A_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__a ) , 1012 )
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = XLMProphetNetTokenizer(__a , keep_accents=__a )
__snake_case : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : int = 'Hello World!'
__snake_case : Tuple = [35389, 6672, 49, 2]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
# fmt: off
__snake_case : Union[str, Any] = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 124
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model"}
lowercase_ = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
lowercase_ = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
lowercase_ = "▁"
class UpperCAmelCase_ (lowerCamelCase__ ):
"""simple docstring"""
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , a_ : List[str] , a_ : int="<s>" , a_ : int="</s>" , a_ : int="</s>" , a_ : List[Any]="<s>" , a_ : Optional[Any]="<unk>" , a_ : Optional[int]="<pad>" , a_ : str="<mask>" , a_ : Optional[Dict[str, Any]] = None , **a_ : List[Any] , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ : Optional[int] = vocab_file
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_ : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase_ : Optional[int] = len(self.sp_model ) - 1
UpperCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None )-> Optional[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
UpperCAmelCase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Optional[int] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False )-> int:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : Optional[int] )-> Any:
"""simple docstring"""
return len(self.sp_model )
def a ( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Union[str, Any] , a_ : str )-> Tuple:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] , a_ : List[Any] )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Optional[int] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def a ( self : Dict , a_ : Tuple )-> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple , a_ : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Any = """"""
UpperCAmelCase_ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.__dict__.copy()
UpperCAmelCase_ : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , a_ : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Optional[Any] , a_ : str , a_ : Optional[str] = None )-> Tuple:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi:
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 470
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowercase_ = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
lowercase_ = '|'.join(sys.argv[1:])
lowercase_ = re.compile(rf"""^({joined_dirs}).*?\.py$""")
lowercase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 562
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
lowerCAmelCase_ = '''</w>'''
lowerCAmelCase_ = '''@@ '''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase_ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , a__ , a__="<s>" , a__="<pad>" , a__="</s>" , a__="<unk>" , a__=False , a__=None , **a__ , ):
super().__init__(
unk_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , do_lower_case=a__ , **a__ , )
_UpperCAmelCase = do_lower_case
with open(a__ , encoding='utf-8' ) as vocab_handle:
_UpperCAmelCase = json.load(a__ )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_UpperCAmelCase = None
_UpperCAmelCase = None
else:
with open(a__ , encoding='utf-8' ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split('\n' )[:-1]
_UpperCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
_UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) )
_UpperCAmelCase = {}
@property
def __A ( self ):
return len(self.decoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
_UpperCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = get_pairs(a__ )
if not pairs:
return token
while True:
_UpperCAmelCase = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(a__ ):
try:
_UpperCAmelCase = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(a__ )
_UpperCAmelCase = new_word
if len(a__ ) == 1:
break
else:
_UpperCAmelCase = get_pairs(a__ )
_UpperCAmelCase = ' '.join(a__ )
if word == "\n " + BPE_TOKEN_MERGES:
_UpperCAmelCase = '\n' + BPE_TOKEN_MERGES
if word.endswith(a__ ):
_UpperCAmelCase = word.replace(a__ , '' )
_UpperCAmelCase = word.replace(' ' , a__ )
_UpperCAmelCase = word
return word
def __A ( self , a__ ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_UpperCAmelCase = text.lower()
_UpperCAmelCase = text.split()
_UpperCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a__ ).split(' ' ) ) )
return split_tokens
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
_UpperCAmelCase = self.decoder.get(a__ , self.unk_token )
return result
def __A ( self , a__ ):
_UpperCAmelCase = ' '.join(a__ )
# make sure @@ tokens are concatenated
_UpperCAmelCase = ''.join(string.split(a__ ) )
return string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + '\n' )
_UpperCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_UpperCAmelCase = token_index
writer.write(' '.join(a__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 494
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCAmelCase_ = {'''vinai/bartpho-syllable''': 1_024}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = monolingual_vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCAmelCase = {}
_UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a__ ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = cnt
cnt += 1
with open(a__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
_UpperCAmelCase = line.strip().split()[0]
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(a__ ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self ):
return len(self.fairseq_ids_to_tokens )
def __A ( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ ):
return self.sp_model.encode(a__ , out_type=a__ )
def __A ( self , a__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __A ( self , a__ ):
return self.fairseq_ids_to_tokens[index]
def __A ( self , a__ ):
_UpperCAmelCase = ''.join(a__ ).replace(a__ , ' ' ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
a__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , a__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(a__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(a__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 494
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = SwinvaConfig()
_lowerCAmelCase = swinva_name.split("_" )
_lowerCAmelCase = name_split[1]
if "to" in name_split[3]:
_lowerCAmelCase = int(name_split[3][-3:] )
else:
_lowerCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_lowerCAmelCase = int(name_split[2][-2:] )
else:
_lowerCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 6, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_lowerCAmelCase = 128
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (4, 8, 16, 32)
else:
_lowerCAmelCase = 192
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_lowerCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_lowerCAmelCase = 21841
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "imagenet-22k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
else:
_lowerCAmelCase = 1000
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = img_size
_lowerCAmelCase = num_classes
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
return config
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_lowerCAmelCase = "encoder." + name
if "attn.proj" in name:
_lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_lowerCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_lowerCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_lowerCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_lowerCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
_lowerCAmelCase = "layernorm.bias"
if "head" in name:
_lowerCAmelCase = name.replace("head" , "classifier" )
else:
_lowerCAmelCase = "swinv2." + name
return name
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCAmelCase = key.split("." )
_lowerCAmelCase = int(key_split[1] )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[
dim : dim * 2
]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
_lowerCAmelCase = get_swinva_config(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCAmelCase = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
_lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
_lowerCAmelCase = timm_model(inputs["pixel_values"] )
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 18
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84
| 1
|
"""simple docstring"""
_snake_case = 8.3_14_45_98
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 389
|
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 389
| 1
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def _lowerCamelCase( a ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]["hidden_dim"]
__a = CONFIG_MAP[model_name]["width_coef"]
__a = CONFIG_MAP[model_name]["depth_coef"]
__a = CONFIG_MAP[model_name]["image_size"]
__a = CONFIG_MAP[model_name]["dropout_rate"]
__a = CONFIG_MAP[model_name]["dw_padding"]
__a = "huggingface/label-files"
__a = "imagenet-1k-id2label.json"
__a = 1_0_0_0
__a = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a , stream=a ).raw )
return im
def _lowerCamelCase( a ):
__a = CONFIG_MAP[model_name]["image_size"]
__a = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=a , )
return preprocessor
def _lowerCamelCase( a ):
__a = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__a = sorted(set(a ) )
__a = len(a )
__a = {b: str(a ) for b, i in zip(a , range(a ) )}
__a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = "efficientnet." + item[1]
__a = "classifier.weight"
__a = "classifier.bias"
return key_mapping
def _lowerCamelCase( a , a , a ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(a ) )
else:
__a = torch.from_numpy(a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a )
@torch.no_grad()
def _lowerCamelCase( a , a , a , a ):
__a = model_classes[model_name](
include_top=a , weights="imagenet" , input_tensor=a , input_shape=a , pooling=a , classes=1_0_0_0 , classifier_activation="softmax" , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(a )
__a = EfficientNetForImageClassification(a ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__a = rename_keys(a )
replace_params(a , a , a )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(a )
__a = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**a )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]["image_size"]
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(a )
__a = np.expand_dims(a , axis=0 )
__a = original_model.predict(a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a , a , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(a ):
os.mkdir(a )
# Save converted model and image processor
hf_model.save_pretrained(a )
preprocessor.save_pretrained(a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
__a = F"efficientnet-{model_name}"
preprocessor.push_to_hub(a )
hf_model.push_to_hub(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
SCREAMING_SNAKE_CASE__:int = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 67
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67
| 1
|
'''simple docstring'''
import math
def lowerCamelCase__ ( __lowercase ):
if not isinstance(__lowercase , __lowercase ):
snake_case : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
snake_case : List[Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
snake_case : List[str] = int(math.log(number // 3 , 2 ) ) + 2
snake_case : int = [3, 5]
snake_case : List[str] = 2
snake_case : List[Any] = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase : Optional[int] = 0
try:
lowercase : List[str] = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 116
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a (a__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = DanceDiffusionPipeline
lowerCAmelCase_ : int = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCAmelCase_ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowerCAmelCase_ : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Tuple = False
def snake_case_ ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case : List[str] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=16_000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=__a ,use_timestep_embedding=__a ,time_embedding_type="""fourier""" ,mid_block_type="""UNetMidBlock1D""" ,down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") ,up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") ,)
snake_case : Dict = IPNDMScheduler()
snake_case : int = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def snake_case_ ( self ,__a ,__a=0 ) -> Tuple:
if str(__a ).startswith("""mps""" ):
snake_case : Optional[Any] = torch.manual_seed(__a )
else:
snake_case : Union[str, Any] = torch.Generator(device=__a ).manual_seed(__a )
snake_case : str = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def snake_case_ ( self ) -> Any:
snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : List[str] = DanceDiffusionPipeline(**__a )
snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
snake_case : Union[str, Any] = self.get_dummy_inputs(__a )
snake_case : str = pipe(**__a )
snake_case : Union[str, Any] = output.audios
snake_case : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
snake_case : Optional[Any] = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def snake_case_ ( self ) -> List[str]:
return super().test_save_load_local()
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def snake_case_ ( self ) -> Dict:
return super().test_save_load_optional_components()
@skip_mps
def snake_case_ ( self ) -> Any:
return super().test_attention_slicing_forward_pass()
def snake_case_ ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
snake_case : str = torch_device
snake_case : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
snake_case : int = torch.manual_seed(0 )
snake_case : int = pipe(generator=__a ,num_inference_steps=100 ,audio_length_in_s=4.096 )
snake_case : Dict = output.audios
snake_case : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case : Tuple = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ) -> str:
snake_case : str = torch_device
snake_case : List[Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" ,torch_dtype=torch.floataa )
snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
snake_case : str = torch.manual_seed(0 )
snake_case : List[str] = pipe(generator=__a ,num_inference_steps=100 ,audio_length_in_s=4.096 )
snake_case : Optional[int] = output.audios
snake_case : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case : Optional[int] = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 116
| 1
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( __a ):
a__ :List[Any] = (PNDMScheduler,)
a__ :Any = (("num_inference_steps", 50),)
def A_ (self , **__UpperCamelCase ) -> List[str]:
UpperCamelCase_ : Any = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase_ )
return config
def A_ (self , __UpperCamelCase=0 , **__UpperCamelCase ) -> int:
UpperCamelCase_ : Any = dict(self.forward_default_kwargs )
UpperCamelCase_ : int = kwargs.pop("""num_inference_steps""" , lowerCAmelCase_ )
UpperCamelCase_ : str = self.dummy_sample
UpperCamelCase_ : str = 0.1 * sample
UpperCamelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ : Union[str, Any] = self.get_scheduler_config(**lowerCAmelCase_ )
UpperCamelCase_ : int = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residuals
UpperCamelCase_ : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_ )
UpperCamelCase_ : Optional[Any] = scheduler_class.from_pretrained(lowerCAmelCase_ )
new_scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residuals
UpperCamelCase_ : int = dummy_past_residuals[:]
UpperCamelCase_ : str = scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
UpperCamelCase_ : List[str] = new_scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase_ : List[Any] = scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
UpperCamelCase_ : Tuple = new_scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A_ (self ) -> Dict:
pass
def A_ (self , __UpperCamelCase=0 , **__UpperCamelCase ) -> Dict:
UpperCamelCase_ : Union[str, Any] = dict(self.forward_default_kwargs )
UpperCamelCase_ : Dict = kwargs.pop("""num_inference_steps""" , lowerCAmelCase_ )
UpperCamelCase_ : str = self.dummy_sample
UpperCamelCase_ : Optional[Any] = 0.1 * sample
UpperCamelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ : Dict = self.get_scheduler_config()
UpperCamelCase_ : Any = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase_ : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_ )
UpperCamelCase_ : Dict = scheduler_class.from_pretrained(lowerCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase_ : Tuple = dummy_past_residuals[:]
UpperCamelCase_ : List[str] = scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
UpperCamelCase_ : Tuple = new_scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase_ : List[str] = scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
UpperCamelCase_ : int = new_scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A_ (self , **__UpperCamelCase ) -> Optional[Any]:
UpperCamelCase_ : List[Any] = self.scheduler_classes[0]
UpperCamelCase_ : Optional[Any] = self.get_scheduler_config(**lowerCAmelCase_ )
UpperCamelCase_ : List[Any] = scheduler_class(**lowerCAmelCase_ )
UpperCamelCase_ : str = 10
UpperCamelCase_ : Any = self.dummy_model()
UpperCamelCase_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase_ : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase_ : List[str] = scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase_ : str = model(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase_ : Optional[Any] = scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
return sample
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCamelCase_ : List[Any] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase_ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCamelCase_ : Optional[int] = scheduler_class(**lowerCAmelCase_ )
UpperCamelCase_ : str = self.dummy_sample
UpperCamelCase_ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase_ , """set_timesteps""" ):
scheduler.set_timesteps(lowerCAmelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase_ , """set_timesteps""" ):
UpperCamelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase_ : Union[str, Any] = dummy_past_residuals[:]
UpperCamelCase_ : List[Any] = scheduler.step_prk(lowerCAmelCase_ , 0 , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
UpperCamelCase_ : List[str] = scheduler.step_prk(lowerCAmelCase_ , 1 , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase_ : str = scheduler.step_plms(lowerCAmelCase_ , 0 , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
UpperCamelCase_ : Union[str, Any] = scheduler.step_plms(lowerCAmelCase_ , 1 , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A_ (self ) -> Tuple:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def A_ (self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase_ )
UpperCamelCase_ : Tuple = self.scheduler_classes[0]
UpperCamelCase_ : Any = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase_ : Optional[int] = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def A_ (self ) -> List[str]:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def A_ (self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def A_ (self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def A_ (self ) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def A_ (self ) -> str:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase_ )
def A_ (self ) -> Any:
UpperCamelCase_ : Optional[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ : int = self.dummy_sample
UpperCamelCase_ : Union[str, Any] = 0.1 * sample
UpperCamelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCamelCase_ : str = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase_ : Optional[Any] = scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
def A_ (self ) -> Any:
with self.assertRaises(lowerCAmelCase_ ):
UpperCamelCase_ : List[str] = self.scheduler_classes[0]
UpperCamelCase_ : List[Any] = self.get_scheduler_config()
UpperCamelCase_ : Tuple = scheduler_class(**lowerCAmelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A_ (self ) -> List[Any]:
UpperCamelCase_ : Union[str, Any] = self.full_loop()
UpperCamelCase_ : List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
UpperCamelCase_ : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase_ : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
UpperCamelCase_ : List[Any] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Optional[Any] = self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.01 )
UpperCamelCase_ : Any = torch.sum(torch.abs(lowerCAmelCase_ ) )
UpperCamelCase_ : Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Optional[int] = self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.01 )
UpperCamelCase_ : str = torch.sum(torch.abs(lowerCAmelCase_ ) )
UpperCamelCase_ : List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 719
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def A_ (self , __UpperCamelCase = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def A_ (self ) -> Union[str, Any]:
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__(self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Tuple = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = len(__UpperCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCamelCase )}.''' )
# get prompt text embeddings
UpperCamelCase_ : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase_ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase_ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : int = text_embeddings.shape
UpperCamelCase_ : Union[str, Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
UpperCamelCase_ : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ : List[str]
if negative_prompt is None:
UpperCamelCase_ : Union[str, Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !='''
f''' {type(__UpperCamelCase )}.''' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[Any] = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCamelCase_ : Dict = negative_prompt
UpperCamelCase_ : Optional[Any] = text_input_ids.shape[-1]
UpperCamelCase_ : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ : int = uncond_embeddings.shape[1]
UpperCamelCase_ : List[str] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase_ : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ : int = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
UpperCamelCase_ : Tuple = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
UpperCamelCase_ : Optional[Any] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase_ : Union[str, Any] = latents_reference.to(self.device )
UpperCamelCase_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase_ : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase_ : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase_ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase_ : List[str] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase_ : str = 0 if dx < 0 else dx
UpperCamelCase_ : List[str] = 0 if dy < 0 else dy
UpperCamelCase_ : Dict = max(-dx , 0 )
UpperCamelCase_ : str = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase_ : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ : str = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ : Any = {}
if accepts_eta:
UpperCamelCase_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ : str = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
UpperCamelCase_ : str = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_,UpperCamelCase_ : Any = noise_pred.chunk(2 )
UpperCamelCase_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : int = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Optional[int] = 1 / 0.18_215 * latents
UpperCamelCase_ : List[str] = self.vae.decode(__UpperCamelCase ).sample
UpperCamelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase_ : List[Any] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase_ : Tuple = None
if output_type == "pil":
UpperCamelCase_ : Any = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 138
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase_ ( A , A ):
"""simple docstring"""
lowerCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=2_0_0_0 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Union[str, Any]=2_0 , __lowerCamelCase : Optional[int]=1e-3 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.linspace(1 , self.config.sampling_eps , __lowerCamelCase , device=__lowerCamelCase )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_SCREAMING_SNAKE_CASE = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_SCREAMING_SNAKE_CASE = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_SCREAMING_SNAKE_CASE = std.flatten()
while len(std.shape ) < len(score.shape ):
_SCREAMING_SNAKE_CASE = std.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE = -score / std
# compute
_SCREAMING_SNAKE_CASE = -1.0 / len(self.timesteps )
_SCREAMING_SNAKE_CASE = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_SCREAMING_SNAKE_CASE = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_SCREAMING_SNAKE_CASE = beta_t.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE = -0.5 * beta_t * x
_SCREAMING_SNAKE_CASE = torch.sqrt(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = drift - diffusion**2 * score
_SCREAMING_SNAKE_CASE = x + drift * dt
# add noise
_SCREAMING_SNAKE_CASE = randn_tensor(x.shape , layout=x.layout , generator=__lowerCamelCase , device=x.device , dtype=x.dtype )
_SCREAMING_SNAKE_CASE = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 418
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : bool = True , __lowerCamelCase : str=7 , __lowerCamelCase : Union[str, Any]=3_0 , __lowerCamelCase : Tuple=4_0_0 , __lowerCamelCase : List[Any]=3 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_8_8}
_SCREAMING_SNAKE_CASE = size_divisor
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_pad
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int=False ):
"""simple docstring"""
if not batched:
_SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
_SCREAMING_SNAKE_CASE = size / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = size, scale * w
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = scale * h, size
_SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size )
if max(__lowerCamelCase , __lowerCamelCase ) > max_size:
_SCREAMING_SNAKE_CASE = max_size / max(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = newh * scale
_SCREAMING_SNAKE_CASE = neww * scale
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 418
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
snake_case_ = [[0.0, 0.0], [0.0, 0.0]]
snake_case_ = matrix[1][1], matrix[0][0]
snake_case_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
snake_case_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case_ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
snake_case_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case_ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 716
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = ScoreSdeVeScheduler()
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase , return_dict=__UpperCamelCase )[
0
]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'google/ncsnpp-church-256'
snake_case_ = UNetaDModel.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVeScheduler.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 46
| 0
|
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 283
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A_ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCAmelCase = None
class A_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCAmelCase = PandasConfig
def a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def a ( self , A_ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
_UpperCamelCase = data_files
if isinstance(A_ , A_ ):
_UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
_UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={"files": files} ) )
return splits
def a ( self , A_ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCamelCase = table_cast(A_ , self.config.features.arrow_schema )
return pa_table
def a ( self , A_ ):
for i, file in enumerate(itertools.chain.from_iterable(A_ ) ):
with open(A_ , "rb" ) as f:
_UpperCamelCase = pa.Table.from_pandas(pd.read_pickle(A_ ) )
yield i, self._cast_table(A_ )
| 138
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682
| 1
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowercase : int = logging.get_logger(__name__)
__lowercase : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowercase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
"""simple docstring"""
UpperCamelCase_ : str = field(
default=_UpperCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCAmelCase )} )
UpperCamelCase_ : str = field(
default=_UpperCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
UpperCamelCase_ : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
UpperCamelCase_ : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
UpperCamelCase_ : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
UpperCamelCase_ : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCamelCase_ : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCamelCase_ : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
UpperCamelCase_ : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = 'train'
UpperCamelCase_ : Any = 'dev'
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : SquadDataTrainingArguments
UpperCamelCase_ : List[SquadFeatures]
UpperCamelCase_ : Split
UpperCamelCase_ : bool
def __init__( self : int , A_ : SquadDataTrainingArguments , A_ : PreTrainedTokenizer , A_ : Optional[int] = None , A_ : Union[str, Split] = Split.train , A_ : Optional[bool] = False , A_ : Optional[str] = None , A_ : Optional[str] = "pt" , ) -> List[Any]:
__snake_case = args
__snake_case = is_language_sensitive
__snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A_ , A_ ):
try:
__snake_case = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__snake_case = mode
# Load data features from cache or dataset file
__snake_case = "v2" if args.version_2_with_negative else "v1"
__snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + ".lock"
with FileLock(A_ ):
if os.path.exists(A_ ) and not args.overwrite_cache:
__snake_case = time.time()
__snake_case = torch.load(A_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__snake_case = self.old_features["features"]
__snake_case = self.old_features.get('''dataset''' , A_ )
__snake_case = self.old_features.get('''examples''' , A_ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''' )
else:
if mode == Split.dev:
__snake_case = self.processor.get_dev_examples(args.data_dir )
else:
__snake_case = self.processor.get_train_examples(args.data_dir )
__snake_case = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A_ , )
__snake_case = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , A_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Dict ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , A_ : str ) -> Tuple:
__snake_case = self.features[i]
__snake_case = torch.tensor(feature.input_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.attention_mask , dtype=torch.long )
__snake_case = torch.tensor(feature.token_type_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.cls_index , dtype=torch.long )
__snake_case = torch.tensor(feature.p_mask , dtype=torch.float )
__snake_case = torch.tensor(feature.is_impossible , dtype=torch.float )
__snake_case = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__snake_case = torch.tensor(feature.start_position , dtype=torch.long )
__snake_case = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 564
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
def __init__( self : int , a : Dict , a : Optional[Any]=1_3 , a : str=3_0 , a : Dict=2 , a : List[Any]=3 , a : Optional[int]=True , a : Tuple=True , a : List[Any]=3_2 , a : int=2 , a : List[str]=4 , a : Dict=3_7 , a : Optional[Any]="gelu" , a : List[str]=0.1 , a : Dict=0.1 , a : List[Any]=1_0 , a : Union[str, Any]=0.02 , a : Union[str, Any]=3 , a : int=None , a : Any=2 , ):
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Any = image_size
lowercase_ : str = patch_size
lowercase_ : int = num_channels
lowercase_ : str = is_training
lowercase_ : str = use_labels
lowercase_ : List[Any] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : List[Any] = initializer_range
lowercase_ : str = scope
lowercase_ : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ : Any = (image_size // patch_size) ** 2
lowercase_ : str = num_patches + 2
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase__ ( self : Any , a : List[str] , a : str , a : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = TFDeiTModel(config=a )
lowercase_ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Dict , a : Optional[int] , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[str] = TFDeiTForMaskedImageModeling(config=a )
lowercase_ : Union[str, Any] = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Tuple = TFDeiTForMaskedImageModeling(a )
lowercase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : int , a : List[str] , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.type_sequence_label_size
lowercase_ : Union[str, Any] = TFDeiTForImageClassification(a )
lowercase_ : int = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : str = TFDeiTForImageClassification(a )
lowercase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Any = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = config_and_inputs
lowercase_ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase: Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase: Union[str, Any] = False
__lowerCamelCase: Any = False
__lowerCamelCase: List[str] = False
__lowerCamelCase: Optional[Any] = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = TFDeiTModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , tf.keras.layers.Dense ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(a )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def lowerCAmelCase__ ( self : Any , a : int , a : Union[str, Any] , a : Any=False ):
'''simple docstring'''
lowercase_ : Union[str, Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = TFDeiTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
lowercase_ : Union[str, Any] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=a , return_tensors="tf" )
# forward pass
lowercase_ : int = model(**a )
# verify the logits
lowercase_ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase_ : Optional[Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 620
| 0
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _a ( UpperCamelCase__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_lowercase : Optional[Any] = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase_ ) )
lowercase__ = torch.manual_seed(UpperCamelCase_ )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _a ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def lowerCamelCase_ ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase__ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''A fantasy landscape, trending on artstation'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase__ = init_image.resize((128, 128) )
lowercase__ = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''A fantasy landscape, trending on artstation'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 429
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(SCREAMING_SNAKE_CASE )
lowercase__ = importlib.import_module(f'.{module_name}' , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , '''__name__''' , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class _a :
def __init__( self: Optional[Any] ) -> Any:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def lowerCamelCase_ ( cls: List[str] , UpperCamelCase_: List[Any] , **UpperCamelCase_: Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = kwargs.pop('''config''' , UpperCamelCase_ )
lowercase__ = kwargs.pop('''trust_remote_code''' , UpperCamelCase_ )
lowercase__ = True
lowercase__ , lowercase__ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = config_dict.get('''feature_extractor_type''' , UpperCamelCase_ )
lowercase__ = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowercase__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.feature_extractor_type``
lowercase__ = getattr(UpperCamelCase_ , '''feature_extractor_type''' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowercase__ = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowercase__ = feature_extractor_class_from_name(UpperCamelCase_ )
lowercase__ = feature_extractor_auto_map is not None
lowercase__ = feature_extractor_class is not None or type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowercase__ = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = kwargs.pop('''code_revision''' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase__ = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase_ )]
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: List[str] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 429
| 1
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 1_00 ) -> int:
"""simple docstring"""
__snake_case = 0
__snake_case = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'The name of the task to train on.'} , )
_SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=lowercase__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Random seed for initialization.'} , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__snake_case = dataset.filter(lambda SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__snake_case = int(eval_result * len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
__snake_case = dataset.sort("probability" , reverse=SCREAMING_SNAKE_CASE )
__snake_case = dataset.select(range(SCREAMING_SNAKE_CASE ) )
__snake_case = dataset.remove_columns(["label", "probability"] )
__snake_case = dataset.rename_column("prediction" , "label" )
__snake_case = dataset.map(lambda SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} )
__snake_case = dataset.shuffle(seed=args.seed )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
else:
dataset.to_json(SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__snake_case = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE )
__snake_case = STDataArguments(train_file=SCREAMING_SNAKE_CASE , infer_file=SCREAMING_SNAKE_CASE )
__snake_case = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE )
__snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE ).items():
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Sanity checks
__snake_case = {}
__snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__snake_case = args.train_file
__snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__snake_case = args.eval_file
for key in data_files:
__snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__snake_case = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__snake_case = F'''{args.output_dir}/self-train_iter-{{}}'''.format
__snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__snake_case = None
__snake_case = None
__snake_case = 0
__snake_case = False
# Show the progress bar
__snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__snake_case = data_dir_format(SCREAMING_SNAKE_CASE )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "stage-1" )
__snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
arguments_dict.update({key: value} )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "stage-2" )
# Update arguments_dict
__snake_case = model_path
__snake_case = data_files["train"]
__snake_case = current_output_dir
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE )
__snake_case = iteration
__snake_case = data_dir_format(iteration + 1 )
__snake_case = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" ) )
__snake_case = config.idalabel
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-checkpoint.json" )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "test_results_best-checkpoint.json" )
assert os.path.exists(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "r" ) as f:
__snake_case = float(json.load(SCREAMING_SNAKE_CASE )[args.eval_metric] )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "infer_output_best-checkpoint.csv" )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Loading the dataset from local csv or json files.
__snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
__snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE ):
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__snake_case = eval_result
if best_iteration is None:
__snake_case = new_iteration
__snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__snake_case = new_iteration
__snake_case = new_eval_result
__snake_case = 0
else:
if new_eval_result == best_eval_result:
__snake_case = new_iteration
__snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , SCREAMING_SNAKE_CASE )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , )
| 163
| 1
|
def _lowerCAmelCase ( UpperCamelCase__: int ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A = 1
if upper_limit > 0:
A = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_lowercase : Optional[int] = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 715
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowercase : int = logging.get_logger(__name__)
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'upernet'
def __init__( self , a__=None , a__=512 , a__=0.02 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Tuple:
super().__init__(**a__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(a__ , a__ ):
A = backbone_config.get("""model_type""" )
A = CONFIG_MAPPING[backbone_model_type]
A = config_class.from_dict(a__ )
A = backbone_config
A = hidden_size
A = initializer_range
A = pool_scales
A = use_auxiliary_head
A = auxiliary_loss_weight
A = auxiliary_in_channels
A = auxiliary_channels
A = auxiliary_num_convs
A = auxiliary_concat_input
A = loss_ignore_index
def _UpperCAmelCase ( self ) -> Dict:
A = copy.deepcopy(self.__dict__ )
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output
| 546
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A_ = None
A_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : bool = True
UpperCAmelCase : Optional[str] = None
# Automatically constructed
UpperCAmelCase : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase : str = field(default="""Image""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.pa_type
def _lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _lowercase ( self : Dict , lowerCAmelCase_ : dict , lowerCAmelCase_ : Any=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = PIL.Image.open(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ = path.split('''::''' )[-1]
try:
SCREAMING_SNAKE_CASE_ = string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )['''repo_id''']
SCREAMING_SNAKE_CASE_ = token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
SCREAMING_SNAKE_CASE_ = None
with xopen(lowerCAmelCase_ , '''rb''' , use_auth_token=lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE_ = BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowercase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''bytes''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''path''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def _lowercase ( self : Dict , lowerCAmelCase_ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ : List[str] ):
with xopen(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase ( UpperCAmelCase )-> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ = image.format
else:
SCREAMING_SNAKE_CASE_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(UpperCAmelCase ,format=UpperCAmelCase )
return buffer.getvalue()
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if hasattr(UpperCAmelCase ,'''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
SCREAMING_SNAKE_CASE_ = array.dtype
SCREAMING_SNAKE_CASE_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ = dtype.kind
SCREAMING_SNAKE_CASE_ = dtype.itemsize
SCREAMING_SNAKE_CASE_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ = dtype_byteorder + dtype_kind + str(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.dtype(UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_ = PIL.Image.fromarray(array.astype(UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = first_non_null_value(UpperCAmelCase )
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase ,np.ndarray ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
elif isinstance(UpperCAmelCase ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 393
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE_ = deepcopy(lowerCAmelCase_ )
elif os.path.exists(lowerCAmelCase_ ):
with io.open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(lowerCAmelCase_ )
else:
try:
SCREAMING_SNAKE_CASE_ = baseaa.urlsafe_baadecode(lowerCAmelCase_ ).decode('''utf-8''' )
SCREAMING_SNAKE_CASE_ = json.loads(lowerCAmelCase_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
SCREAMING_SNAKE_CASE_ = config
self.set_stage_and_offload()
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value('''zero_optimization.stage''' , -1 )
# offload
SCREAMING_SNAKE_CASE_ = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE_ = set(['''cpu''', '''nvme'''] )
SCREAMING_SNAKE_CASE_ = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE_ = True
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split('''.''' )
SCREAMING_SNAKE_CASE_ = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE_ = config.get(lowerCAmelCase_ )
if config is None:
return None, ds_key
return config, ds_key
def _lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.find_config_node(lowerCAmelCase_ )
if config is None:
return default
return config.get(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split('''.''' )
for node in nodes:
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = config.get(lowerCAmelCase_ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value(lowerCAmelCase_ )
return False if value is None else bool(lowerCAmelCase_ )
def _lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value(lowerCAmelCase_ )
return False if value is None else not bool(lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self._stage == 2
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
return self._offload
class snake_case :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = engine
def _lowercase ( self : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.engine.backward(lowerCAmelCase_ , **lowerCAmelCase_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , device_placement=lowerCAmelCase_ , scaler=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = hasattr(self.optimizer , '''overflow''' )
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=None ) -> Any:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> str:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=0.001 , lowerCAmelCase_ : Union[str, Any]=0 , **lowerCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = params
SCREAMING_SNAKE_CASE_ = lr
SCREAMING_SNAKE_CASE_ = weight_decay
SCREAMING_SNAKE_CASE_ = kwargs
class snake_case :
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]=0 , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = optimizer
SCREAMING_SNAKE_CASE_ = total_num_steps
SCREAMING_SNAKE_CASE_ = warmup_num_steps
SCREAMING_SNAKE_CASE_ = kwargs
| 393
| 1
|
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = {}
def _snake_case ( self : Any ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase__ , """ -> """ , """ -> """.join([str(UpperCamelCase__ ) for j in self.vertex[i]] ) )
def _snake_case ( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase__ )
else:
# else make a new vertex
__lowerCamelCase : Union[str, Any] = [to_vertex]
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase : str = True
print(UpperCamelCase__ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase : int = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 709
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : str = [], []
__lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase , __lowerCamelCase : List[str] = sorted_examples[0]
def is_too_big(UpperCAmelCase : Optional[Any] ):
return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase : Union[str, Any] = new_src + """ """ + src
__lowerCamelCase : str = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
return finished_src, finished_tgt
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = Path(UpperCAmelCase )
save_path.mkdir(exist_ok=UpperCAmelCase )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase , __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" )
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase )
parser.add_argument("""--save_path""" , type=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 458
| 0
|
import json
import sys
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
with open(__lowerCamelCase , encoding="""utf-8""" ) as f:
UpperCAmelCase__ : Any = json.load(__lowerCamelCase )
UpperCAmelCase__ : Dict = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = results[benchmark_name]
UpperCAmelCase__ : List[Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
UpperCAmelCase__ : Union[str, Any] = """| metric |"""
UpperCAmelCase__ : List[Any] = """|--------|"""
UpperCAmelCase__ : str = """| new / old (diff) |"""
for metric_name in sorted(__lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = benchmark_res[metric_name]
UpperCAmelCase__ : Optional[int] = metric_vals["""new"""]
UpperCAmelCase__ : Union[str, Any] = metric_vals.get("""old""" , __lowerCamelCase )
UpperCAmelCase__ : Optional[int] = metric_vals.get("""diff""" , __lowerCamelCase )
UpperCAmelCase__ : List[Any] = F" {new_val:f}" if isinstance(__lowerCamelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(__lowerCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(__lowerCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(__lowerCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = sys.argv[1]
SCREAMING_SNAKE_CASE__ : str = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 79
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Dict = "llama"
A__ : int = ["past_key_values"]
def __init__( self , snake_case_=32000 , snake_case_=4096 , snake_case_=11008 , snake_case_=32 , snake_case_=32 , snake_case_=None , snake_case_="silu" , snake_case_=2048 , snake_case_=0.02 , snake_case_=1e-6 , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=1 , snake_case_=False , snake_case_=None , **snake_case_ , ) -> Any:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_key_value_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = use_cache
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ , )
def __A ( self ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_UpperCAmelCase = self.rope_scaling.get("type" , snake_case_ )
_UpperCAmelCase = self.rope_scaling.get("factor" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 426
| 0
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_A : int = Lock()
def __magic_name__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> List[str]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase : Tuple = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase : Optional[Any] = min(__snake_case , __snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase : Optional[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase : Any = max(__snake_case , __snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__snake_case )
def __magic_name__ ( __snake_case : List[str] ) -> List[str]:
lowercase : List[str] = []
lowercase : Any = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase : Optional[int] = Pipe()
lowercase : Any = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase : List[Any] = temp_rs
lowercase : Optional[int] = temp_rr
for i in range(1 , len(__snake_case ) - 1 ):
lowercase : Union[str, Any] = Pipe()
lowercase : Dict = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase : Tuple = temp_rs
lowercase : str = temp_rr
process_array_.append(
Process(
target=__snake_case , args=(
len(__snake_case ) - 1,
arr[len(__snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__snake_case ) ):
lowercase : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> List[str]:
lowercase : Dict = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*__snake_case )
lowercase : Tuple = odd_even_transposition(__snake_case )
print("Sorted List\n" )
print(*__snake_case )
if __name__ == "__main__":
main()
| 716
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : List[str] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase : Any = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(_a )
from datasets import load_dataset
lowercase : Any = load_dataset("nielsr/rvlcdip-demo" )
lowercase : List[str] = dataset["train"][0]["image"].convert("RGB" )
lowercase : str = image_processor(_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
lowercase : Tuple = model(**_a )
lowercase : Dict = outputs.logits
lowercase : Union[str, Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape , _a )
lowercase : int = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1E-4 ) )
| 518
| 0
|
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
if index == number_of_items:
return 0
_A = 0
_A = 0
_A = knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ , index + 1 )
if weights[index] <= max_weight:
_A = values[index] + knapsack(
snake_case_ , snake_case_ , snake_case_ , max_weight - weights[index] , index + 1 )
return max(snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
import numpy as np
def A_ ( snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : str ,snake_case_ : Optional[int] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : int = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase : Dict = np.zeros((n + 1,) )
UpperCamelCase : Optional[int] = ya
UpperCamelCase : Optional[Any] = xa
for k in range(snake_case_ ):
UpperCamelCase : Optional[Any] = f(snake_case_ ,y[k] )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[int] = f(x + h ,y[k] + h * ka )
UpperCamelCase : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499
| 0
|
lowerCAmelCase : str =[
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : str = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Union[str, Any] = 0
while place < len(__A ):
if (place + 1 < len(__A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = []
for arabic, roman in ROMAN:
(_lowerCamelCase) : List[str] = divmod(__A , __A )
result.append(roman * factor )
if number == 0:
break
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class a__ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_SCREAMING_SNAKE_CASE : Optional[Any] = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE : Any = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE : Any = Features({'summary': Value('string' )} )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'text'
_SCREAMING_SNAKE_CASE : str = 'summary'
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 245
|
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase = 2_5_0_0_0_4
__UpperCAmelCase = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class a_( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Tuple =MBartaaTokenizer
__snake_case : Any =MBartaaTokenizerFast
__snake_case : str =True
__snake_case : Tuple =True
def __UpperCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = MBartaaTokenizer(lowerCAmelCase__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def __UpperCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowerCAmelCase__) , 1_0_5_4)
def __UpperCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4)
def __UpperCamelCase ( self : int) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MBartaaTokenizer(lowerCAmelCase__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def __UpperCamelCase ( self : str) -> List[str]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE = {'input_ids': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def __UpperCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__)
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__)
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
@require_torch
@require_sentencepiece
@require_tokenizers
class a_( unittest.TestCase ):
"""simple docstring"""
__snake_case : List[Any] ='''facebook/mbart-large-50-one-to-many-mmt'''
__snake_case : Union[str, Any] =[
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__snake_case : Union[str, Any] =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__snake_case : str =[EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __UpperCamelCase ( cls : List[Any]) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO')
SCREAMING_SNAKE_CASE = 1
return cls
def __UpperCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 2_5_0_0_3_8)
def __UpperCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids)
SCREAMING_SNAKE_CASE = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__)
def __UpperCamelCase ( self : Tuple) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = 1_0
SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__).input_ids[0]
self.assertEqual(ids[0] , lowerCAmelCase__)
self.assertEqual(ids[-1] , 2)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
def __UpperCamelCase ( self : Tuple) -> str:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']) , [2_5_0_0_5_3, 2_5_0_0_0_1])
def __UpperCamelCase ( self : Any) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = MBartaaTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__)
@require_torch
def __UpperCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='pt')
SCREAMING_SNAKE_CASE = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 1_4) , batch.input_ids.shape)
self.assertEqual((2, 1_4) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
self.assertEqual(2 , batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def __UpperCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1_0 , return_tensors='pt')
SCREAMING_SNAKE_CASE = targets['input_ids']
SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def __UpperCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR')
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {
# en_XX, A, test, EOS
'input_ids': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 259
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class a_( unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case : Dict =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__snake_case : Optional[Any] ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__snake_case : Any ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}])
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'This is bad'] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=1)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
# Legacy behavior
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]])
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
{'label': 'LABEL_0', 'score': 0.5_04},
{'label': 'LABEL_0', 'score': 0.5_04},
] , )
@require_torch
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu') , )
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
@require_tf
def __UpperCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
@slow
@require_torch
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text-classification')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('This is bad !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}])
@slow
@require_tf
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text-classification' , framework='tf')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('This is bad !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}])
def __UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCamelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE = 'HuggingFace is in'
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}])
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
SCREAMING_SNAKE_CASE = ['HuggingFace is in ', 'Paris is in France']
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}, {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [[{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N, [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N] , )
SCREAMING_SNAKE_CASE = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(lowerCAmelCase__):
text_classifier(lowerCAmelCase__)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE = text_classifier([[['HuggingFace is in ', 'Paris is in France']]])
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
| 259
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = "gpt_neo"
_lowerCamelCase = ["past_key_values"]
_lowerCamelCase = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self ,lowerCamelCase_=5_0_2_5_7 ,lowerCamelCase_=2_0_4_8 ,lowerCamelCase_=2_0_4_8 ,lowerCamelCase_=2_4 ,lowerCamelCase_=[[["global", "local"], 1_2]] ,lowerCamelCase_=1_6 ,lowerCamelCase_=None ,lowerCamelCase_=2_5_6 ,lowerCamelCase_="gelu_new" ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-5 ,lowerCamelCase_=0.02 ,lowerCamelCase_=True ,lowerCamelCase_=5_0_2_5_6 ,lowerCamelCase_=5_0_2_5_6 ,**lowerCamelCase_ ,) -> Tuple:
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_layers
A = num_heads
A = intermediate_size
A = window_size
A = activation_function
A = resid_dropout
A = embed_dropout
A = attention_dropout
A = classifier_dropout
A = layer_norm_epsilon
A = initializer_range
A = use_cache
A = bos_token_id
A = eos_token_id
A = attention_types
A = self.expand_attention_types_params(lowerCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> Any:
A = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( _a : int , _a : Tuple , _a : Dict , _a : Union[str, Any] ):
"""simple docstring"""
import torch
A = input.size()
A = len(SCREAMING_SNAKE_CASE__ )
A = shape[dimension]
A = torch.arange(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = torch.div(sizedim - size , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" ) + 1
A = torch.arange(SCREAMING_SNAKE_CASE__ ) + low_indices[:min_length][:, None]
A = [slice(SCREAMING_SNAKE_CASE__ )] * rank
A = indices
A = input[s]
A = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE__ )
def _A ( _a : Tuple , _a : Any ):
"""simple docstring"""
import torch
A = torch.arange(1 , SCREAMING_SNAKE_CASE__ )
A = torch.remainder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = remainders == 0
A = candidates[divisor_indices]
A = torch.max(SCREAMING_SNAKE_CASE__ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
A = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
A = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ) -> int:
return self._config.num_heads
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = -1 ,lowerCamelCase_ = -1 ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,) -> str:
A = super(lowerCamelCase_ ,self ).generate_dummy_inputs(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
A = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A = seqlen + 2
A = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
A = common_inputs["""attention_mask"""]
if self.use_past:
A = ordered_inputs["""attention_mask"""].dtype
A = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase_ ,lowerCamelCase_ ,dtype=lowerCamelCase_ )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ) -> Tuple:
return 1_3
| 617
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __snake_case ( lowerCAmelCase ):
_a : Optional[int]= "openai/whisper-base"
_a : int= (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_a : int= "transcriber"
_a : List[str]= WhisperProcessor
_a : Optional[int]= WhisperForConditionalGeneration
_a : List[str]= ["audio"]
_a : Any= ["text"]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case ,return_tensors="""pt""" ).input_features
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.model.generate(inputs=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case ,skip_special_tokens=snake_case )[0]
| 336
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( _A):
lowerCamelCase :List[str] = ["image_processor", "tokenizer"]
lowerCamelCase :Any = "ViltImageProcessor"
lowerCamelCase :Optional[int] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase ) -> Tuple:
lowerCamelCase : Tuple =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
lowerCamelCase : List[Any] =kwargs.pop('''feature_extractor''' )
lowerCamelCase : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
lowerCamelCase : Optional[int] =self.image_processor
def __call__( self , __lowercase , __lowercase = None , __lowercase = True , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = 0 , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = None , **__lowercase , ) -> BatchEncoding:
lowerCamelCase : Tuple =self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel_values + pixel_mask
lowerCamelCase : List[str] =self.image_processor(__lowercase , return_tensors=__lowercase )
encoding.update(__lowercase )
return encoding
def __lowercase ( self , *__lowercase , **__lowercase ) -> Any:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __lowercase ( self , *__lowercase , **__lowercase ) -> Dict:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __lowercase ( self ) -> Dict:
lowerCamelCase : Tuple =self.tokenizer.model_input_names
lowerCamelCase : Optional[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Dict:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class snake_case_ ( _A):
lowerCamelCase :Union[str, Any] = "timesformer"
def __init__( self , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=8 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1e-6 , __lowercase=True , __lowercase="divided_space_time" , __lowercase=0 , **__lowercase , ) -> List[Any]:
super().__init__(**__lowercase )
lowerCamelCase : int =image_size
lowerCamelCase : List[str] =patch_size
lowerCamelCase : Union[str, Any] =num_channels
lowerCamelCase : str =num_frames
lowerCamelCase : Dict =hidden_size
lowerCamelCase : int =num_hidden_layers
lowerCamelCase : Dict =num_attention_heads
lowerCamelCase : Dict =intermediate_size
lowerCamelCase : Union[str, Any] =hidden_act
lowerCamelCase : str =hidden_dropout_prob
lowerCamelCase : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase : List[Any] =initializer_range
lowerCamelCase : List[Any] =layer_norm_eps
lowerCamelCase : List[str] =qkv_bias
lowerCamelCase : Tuple =attention_type
lowerCamelCase : List[Any] =drop_path_rate
| 262
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple ={"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =[
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_lowercase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 364
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCamelCase_ ( snake_case__ ):
# to overwrite at feature extractactor specific tests
_a : str = None
_a : Tuple = None
@property
def __a ( self : str ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def __a ( self : int ):
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase , 'feature_size' ) )
self.assertTrue(hasattr(lowerCamelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(lowerCamelCase , 'padding_value' ) )
def __a ( self : List[Any] ):
lowerCamelCase_ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : int = feat_extract.model_input_names[0]
lowerCamelCase_ : Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
lowerCamelCase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase_ : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __a ( self : Dict ):
lowerCamelCase_ : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
lowerCamelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : Tuple = feat_extract.model_input_names[0]
lowerCamelCase_ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase_ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __a ( self : Tuple ):
lowerCamelCase_ : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : int = feat_extract.model_input_names[0]
lowerCamelCase_ : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCamelCase_ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __a ( self : Optional[Any] , lowerCamelCase : int=False ):
def _inputs_have_equal_length(lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ : Any = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase : List[str] , lowerCamelCase : Dict ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1E-3 ):
return False
return True
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
lowerCamelCase_ : List[Any] = feat_extract.model_input_names[0]
lowerCamelCase_ : List[Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : Optional[int] = self.feat_extract_tester.seq_length_diff
lowerCamelCase_ : int = self.feat_extract_tester.max_seq_length + pad_diff
lowerCamelCase_ : Optional[int] = self.feat_extract_tester.min_seq_length
lowerCamelCase_ : List[Any] = self.feat_extract_tester.batch_size
lowerCamelCase_ : int = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCamelCase_ : int = feat_extract.pad(lowerCamelCase , padding=lowerCamelCase )
lowerCamelCase_ : Tuple = input_a[input_name]
lowerCamelCase_ : List[str] = feat_extract.pad(lowerCamelCase , padding='longest' )
lowerCamelCase_ : Optional[Any] = input_a[input_name]
lowerCamelCase_ : List[Any] = feat_extract.pad(lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCamelCase_ : List[str] = input_a[input_name]
lowerCamelCase_ : Union[str, Any] = feat_extract.pad(lowerCamelCase , padding='longest' , return_tensors='np' )
lowerCamelCase_ : int = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding='max_length' )[input_name]
lowerCamelCase_ : Union[str, Any] = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=lowerCamelCase , return_tensors='np' )
lowerCamelCase_ : Dict = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase_ : Optional[Any] = feat_extract.pad(lowerCamelCase , pad_to_multiple_of=10 )
lowerCamelCase_ : Dict = input_a[input_name]
lowerCamelCase_ : List[Any] = feat_extract.pad(lowerCamelCase , padding='longest' , pad_to_multiple_of=10 )
lowerCamelCase_ : Union[str, Any] = input_a[input_name]
lowerCamelCase_ : str = feat_extract.pad(
lowerCamelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCamelCase )
lowerCamelCase_ : List[str] = input_a[input_name]
lowerCamelCase_ : Any = feat_extract.pad(
lowerCamelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCamelCase , return_tensors='np' , )
lowerCamelCase_ : str = input_a[input_name]
self.assertTrue(all(len(lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
lowerCamelCase_ : Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCamelCase_ : Dict = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __a ( self : Optional[Any] , lowerCamelCase : Any=False ):
def _inputs_have_equal_length(lowerCamelCase : Dict ):
lowerCamelCase_ : Dict = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase : int , lowerCamelCase : List[str] ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1E-3 ):
return False
return True
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
lowerCamelCase_ : Any = feat_extract.model_input_names[0]
lowerCamelCase_ : str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCamelCase_ : Dict = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=lowerCamelCase )
lowerCamelCase_ : int = input_a[input_name]
lowerCamelCase_ : str = feat_extract.pad(lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCamelCase_ : Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to smallest with np
lowerCamelCase_ : Optional[int] = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=lowerCamelCase , )
lowerCamelCase_ : List[str] = input_a[input_name]
lowerCamelCase_ : int = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCamelCase_ : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to middle
lowerCamelCase_ : Tuple = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase , return_tensors='np' , )
lowerCamelCase_ : Union[str, Any] = input_a[input_name]
lowerCamelCase_ : str = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase )
lowerCamelCase_ : Any = input_a[input_name]
lowerCamelCase_ : int = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCamelCase_ : Optional[int] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding='longest' , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding='longest' , truncation=lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding='max_length' , truncation=lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase_ : str = 12
lowerCamelCase_ : List[Any] = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
lowerCamelCase_ : List[str] = input_a[input_name]
lowerCamelCase_ : List[Any] = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , )
lowerCamelCase_ : Dict = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCamelCase_ : Optional[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCamelCase_ : str = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
def __a ( self : Any ):
self._check_padding(numpify=lowerCamelCase )
def __a ( self : List[Any] ):
self._check_padding(numpify=lowerCamelCase )
def __a ( self : int ):
self._check_truncation(numpify=lowerCamelCase )
def __a ( self : Optional[int] ):
self._check_truncation(numpify=lowerCamelCase )
@require_torch
def __a ( self : str ):
lowerCamelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase_ : Optional[Any] = feat_extract.model_input_names[0]
lowerCamelCase_ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : int = feat_extract.pad(lowerCamelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase_ : Dict = feat_extract.pad(lowerCamelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __a ( self : Any ):
lowerCamelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase_ : Tuple = feat_extract.model_input_names[0]
lowerCamelCase_ : Tuple = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : Dict = feat_extract.pad(lowerCamelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase_ : List[str] = feat_extract.pad(lowerCamelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self : str ):
lowerCamelCase_ : Any = self.feat_extract_dict
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Dict = self.feature_extraction_class(**lowerCamelCase )
lowerCamelCase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase_ : List[str] = [len(lowerCamelCase ) for x in speech_inputs]
lowerCamelCase_ : List[Any] = feat_extract.model_input_names[0]
lowerCamelCase_ : List[str] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : Tuple = feat_extract.pad(lowerCamelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def __a ( self : Dict ):
lowerCamelCase_ : int = self.feat_extract_dict
lowerCamelCase_ : Any = True
lowerCamelCase_ : int = self.feature_extraction_class(**lowerCamelCase )
lowerCamelCase_ : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase_ : str = [len(lowerCamelCase ) for x in speech_inputs]
lowerCamelCase_ : Optional[Any] = feat_extract.model_input_names[0]
lowerCamelCase_ : Any = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : Optional[Any] = min(lowerCamelCase )
lowerCamelCase_ : List[Any] = feat_extract.pad(
lowerCamelCase , padding='max_length' , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors='np' )
self.assertIn('attention_mask' , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 364
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase : List[Any] = 2_5_0_0_0_4
UpperCAmelCase : Tuple = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (a__ , unittest.TestCase ):
_lowercase : str = MBartTokenizer
_lowercase : List[Any] = MBartTokenizerFast
_lowercase : int = True
_lowercase : int = True
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : str = MBartTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = MBartTokenizer(lowercase__ , keep_accents=lowercase__ )
_snake_case : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_snake_case : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_snake_case : str = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
_snake_case : Dict = tempfile.mkdtemp()
_snake_case : int = tokenizer_r.save_pretrained(lowercase__ )
_snake_case : Dict = tokenizer_p.save_pretrained(lowercase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_snake_case : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase__ , lowercase__ )
# Checks everything loads correctly in the same way
_snake_case : Tuple = tokenizer_r.from_pretrained(lowercase__ )
_snake_case : List[str] = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase__ )
# Save tokenizer rust, legacy_format=True
_snake_case : Tuple = tempfile.mkdtemp()
_snake_case : List[str] = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ )
_snake_case : Any = tokenizer_p.save_pretrained(lowercase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase__ , lowercase__ )
# Checks everything loads correctly in the same way
_snake_case : Union[str, Any] = tokenizer_r.from_pretrained(lowercase__ )
_snake_case : Dict = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
shutil.rmtree(lowercase__ )
# Save tokenizer rust, legacy_format=False
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[Any] = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ )
_snake_case : str = tokenizer_p.save_pretrained(lowercase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case : str = tokenizer_r.from_pretrained(lowercase__ )
_snake_case : Tuple = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
shutil.rmtree(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
_lowercase : Any = """facebook/mbart-large-en-ro"""
_lowercase : str = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_lowercase : Union[str, Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_lowercase : Any = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCAmelCase_ ( cls ) -> Dict:
"""simple docstring"""
_snake_case : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_snake_case : int = 1
return cls
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
_snake_case : Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
_snake_case : str = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
_snake_case : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowercase__ )
_snake_case : str = 10
_snake_case : Dict = self.tokenizer(lowercase__ , max_length=lowercase__ , truncation=lowercase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase__ )
self.assertEqual(len(lowercase__ ) , lowercase__ )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250_026, 250_001] )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Tuple = tempfile.mkdtemp()
_snake_case : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase__ )
_snake_case : Tuple = MBartTokenizer.from_pretrained(lowercase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase__ )
@require_torch
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors='''pt''' )
_snake_case : Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_snake_case : Optional[int] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_snake_case : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = self.tokenizer(self.src_text , padding=lowercase__ , truncation=lowercase__ , max_length=3 , return_tensors='''pt''' )
_snake_case : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=10 , return_tensors='''pt''' )
_snake_case : Any = targets['''input_ids''']
_snake_case : Dict = shift_tokens_right(lowercase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowercase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3_034, 2, 250_004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250_001,
} , )
| 47
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase = 6 ):
lowercase__: Node | None = None
lowercase__: Node | None = None
self.create_linked_list(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Any = Node()
lowercase__: Dict = current_node
lowercase__: int = current_node
lowercase__: Optional[Any] = current_node
for _ in range(1 , _UpperCAmelCase ):
lowercase__: List[str] = Node()
lowercase__: str = current_node
lowercase__: List[Any] = previous_node
lowercase__: Union[str, Any] = current_node
lowercase__: int = self.front
lowercase__: Optional[int] = previous_node
def _snake_case ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _snake_case ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _snake_case ( self , _UpperCAmelCase ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__: Tuple = self.rear.next
if self.rear:
lowercase__: Optional[int] = data
def _snake_case ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__: List[str] = self.front.data
lowercase__: Union[str, Any] = None
return data
lowercase__: Tuple = self.front
lowercase__: Dict = old_front.next
lowercase__: Tuple = old_front.data
lowercase__: int = None
return data
def _snake_case ( self ):
if self.is_empty():
raise Exception('''Empty Queue''' )
def _snake_case ( self ):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
lowercase__: Any | None = None
lowercase__: Node | None = None
lowercase__: Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 586
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DeiTFeatureExtractor"]
__A = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = size if size is not None else {'shortest_edge': 224}
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 256, 'width': 256}
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='crop_size' )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowerCAmelCase = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ) -> Optional[Any]:
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(UpperCAmelCase_ , data_format=UpperCAmelCase_ )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='crop_size' )
_lowerCAmelCase = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase = [self.flip_channel_order(image=UpperCAmelCase_ ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
_lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Tuple] = None ) -> str:
"""simple docstring"""
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCAmelCase_ ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_ )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 491
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = "gpt_neox_japanese"
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any=32_000 , UpperCAmelCase_ : Dict=2_560 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Union[str, Any]=1.00 , UpperCAmelCase_ : Any=10_000 , UpperCAmelCase_ : Optional[int]=2_048 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1E-5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=31_996 , UpperCAmelCase_ : List[Any]=31_999 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=0.0 , **UpperCAmelCase_ : str , ) -> int:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_multiple_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = rotary_pct
_lowerCAmelCase = rotary_emb_base
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = hidden_dropout
| 491
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any=13 , _snake_case : Union[str, Any]=[30, 30] , _snake_case : Optional[int]=2 , _snake_case : str=3 , _snake_case : List[str]=True , _snake_case : Tuple=True , _snake_case : Union[str, Any]=32 , _snake_case : List[str]=5 , _snake_case : str=4 , _snake_case : Tuple=37 , _snake_case : Tuple="gelu" , _snake_case : str=0.1 , _snake_case : Tuple=0.1 , _snake_case : Optional[Any]=10 , _snake_case : Dict=0.0_2 , _snake_case : int=3 , _snake_case : Optional[int]=None , _snake_case : str=8 , _snake_case : Dict=10 , ) -> int:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = scope
A_ = n_targets
A_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A_ = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase__ ( self : Any ) -> str:
"""simple docstring"""
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A_ = []
for i in range(self.batch_size ):
A_ = {}
A_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_snake_case )
A_ = torch.rand(self.n_targets , 4 , device=_snake_case )
labels.append(_snake_case )
A_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = YolosModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase__ ( self : Tuple , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Any ) -> Optional[int]:
"""simple docstring"""
A_ = YolosForObjectDetection(_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(pixel_values=_snake_case )
A_ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A_ = model(pixel_values=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
snake_case = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : int , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Dict=False ) -> List[Any]:
"""simple docstring"""
A_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A_ = []
for i in range(self.model_tester.batch_size ):
A_ = {}
A_ = torch.ones(
size=(self.model_tester.n_targets,) , device=_snake_case , dtype=torch.long )
A_ = torch.ones(
self.model_tester.n_targets , 4 , device=_snake_case , dtype=torch.float )
labels.append(_snake_case )
A_ = labels
return inputs_dict
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A_ = YolosModelTester(self )
A_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
# YOLOS does not use inputs_embeds
pass
def lowerCamelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
# in YOLOS, the seq_len is different
A_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = True
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A_ = len(_snake_case )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = 1
self.assertEqual(out_len + added_hidden_states , len(_snake_case ) )
A_ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : str , _snake_case : Dict , _snake_case : Dict ):
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = outputs.hidden_states
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_snake_case ) , _snake_case )
# YOLOS has a different seq_length
A_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def lowerCamelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_snake_case )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = YolosModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A_ ():
'''simple docstring'''
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(_snake_case )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
A_ = model(inputs.pixel_values )
# verify outputs
A_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _snake_case )
A_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_snake_case , )
A_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _snake_case , atol=1e-4 ) )
# verify postprocessing
A_ = image_processor.post_process_object_detection(
_snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A_ = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_snake_case )
A_ = [75, 75, 17, 63, 17]
A_ = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_snake_case )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , _snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , _snake_case )
self.assertTrue(torch.allclose(results["boxes"][0, :] , _snake_case ) )
| 115
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
with open(__a ) as metadata_file:
A_ = json.load(__a )
A_ = LukeConfig(use_entity_aware_attention=__a , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__a , map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__a )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" , lstrip=__a , rstrip=__a )
A_ = AddedToken("<ent2>" , lstrip=__a , rstrip=__a )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__a , __a )
A_ = LukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'encoder.layer.{layer_index}.attention.self.'
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__a ).eval()
A_ , A_ = model.load_state_dict(__a , strict=__a )
if not (len(__a ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(__a )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__a , task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__a , entity_spans=[span] , add_prefix_space=__a , return_tensors="pt" )
A_ = model(**__a )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__a ) )
model.save_pretrained(__a )
def A_ (__a ):
'''simple docstring'''
A_ = {}
with open(__a , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__a ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 115
| 1
|
"""simple docstring"""
def UpperCAmelCase ( ):
'''simple docstring'''
return [list(range(1000 - i, -1000 - i, -1 ) ) for i in range(1000 )]
_A = generate_large_matrix()
_A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase ( a_ ):
'''simple docstring'''
assert all(row == sorted(a_, reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_, reverse=a_ ) for col in zip(*a_ ) )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase : Tuple = (left + right) // 2
lowerCamelCase : List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase : List[Any] = mid + 1
else:
lowerCamelCase : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = len(grid[0] )
for i in range(len(a_ ) ):
lowerCamelCase : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def UpperCAmelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
lowerCamelCase : Dict = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase : List[str] = timeit(F"""{func}(grid=grid)""", setup=a_, number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 133
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'swin2sr'
lowercase_ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCAmelCase_=64 , UpperCAmelCase_=1 , UpperCAmelCase_=3 , UpperCAmelCase_=180 , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=8 , UpperCAmelCase_=2.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=2 , UpperCAmelCase_=1.0 , UpperCAmelCase_="1conv" , UpperCAmelCase_="pixelshuffle" , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
lowerCamelCase : int = image_size
lowerCamelCase : Tuple = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : int = depths
lowerCamelCase : Any = len(UpperCAmelCase_ )
lowerCamelCase : Tuple = num_heads
lowerCamelCase : Optional[int] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = drop_path_rate
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : str = use_absolute_embeddings
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = upscale
lowerCamelCase : List[Any] = img_range
lowerCamelCase : Optional[Any] = resi_connection
lowerCamelCase : Union[str, Any] = upsampler
| 133
| 1
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool = True ,__UpperCamelCase: float = math.inf ,__UpperCamelCase: float = -math.inf ,__UpperCamelCase: float = math.inf ,__UpperCamelCase: float = -math.inf ,__UpperCamelCase: bool = False ,__UpperCamelCase: float = 1_00 ,__UpperCamelCase: float = 0.0_1 ,__UpperCamelCase: float = 1 ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = search_prob
SCREAMING_SNAKE_CASE : Any = start_temperate
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Tuple = None
while not search_end:
SCREAMING_SNAKE_CASE : str = current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE : Any = current_state
scores.append(__UpperCamelCase )
iterations += 1
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE : Optional[int] = random.randint(0 ,len(__UpperCamelCase ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE : Union[str, Any] = neighbors.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE : str = picked_neighbor
else:
SCREAMING_SNAKE_CASE : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE : List[Any] = picked_neighbor
SCREAMING_SNAKE_CASE : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Dict = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__UpperCamelCase ) ,__UpperCamelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
UpperCamelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
UpperCamelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
| 28
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ = 101 ):
lowercase_ :Tuple = length
def __len__( self ):
return self.length
def __getitem__( self , UpperCamelCase_ ):
return i
class UpperCamelCase :
'''simple docstring'''
def __call__( self , UpperCamelCase_ ):
return {"input_ids": torch.tensor(UpperCamelCase_ ), "labels": torch.tensor(UpperCamelCase_ )}
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase_ :List[Any] = nn.Linear(120 , 80 )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
@require_torch_neuroncore
def UpperCamelCase ( self ):
lowercase_ :int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowercase_ :List[str] = self.get_auto_remove_tmp_dir()
lowercase_ :List[str] = f"--output_dir {output_dir}".split()
lowercase_ :Any = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
@require_torch_multi_gpu
def UpperCamelCase ( self ):
lowercase_ :Any = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowercase_ :str = self.get_auto_remove_tmp_dir()
lowercase_ :List[str] = f"--output_dir {output_dir}".split()
lowercase_ :Dict = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE : Dict = DummyDataset(dataset_length)
def UpperCamelCase ( _a ) -> Dict:
'''simple docstring'''
lowercase_ :Optional[Any] = list(range(len(_a ) ) )
lowercase_ :Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE : Optional[int] = None
| 441
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
_UpperCAmelCase : Union[str, Any] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase =bs[:]
lowercase =0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
lowercase =[chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def UpperCamelCase ( lowercase_ : int ) -> Optional[int]:
'''simple docstring'''
lowercase =set()
lowercase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase =char
return pairs
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ):
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
lowercase =json.load(snake_case_ )
lowercase ={v: k for k, v in self.encoder.items()}
lowercase =errors # how to handle errors in decoding
lowercase =bytes_to_unicode()
lowercase ={v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
lowercase =merges_handle.read().split('''\n''' )[1:-1]
lowercase =[tuple(merge.split() ) for merge in bpe_merges]
lowercase =dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase ={}
lowercase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _A( self ):
return len(self.encoder )
def _A( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A( self , snake_case_ ):
if token in self.cache:
return self.cache[token]
lowercase =tuple(snake_case_ )
lowercase =get_pairs(snake_case_ )
if not pairs:
return token
while True:
lowercase =min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase =bigram
lowercase =[]
lowercase =0
while i < len(snake_case_ ):
try:
lowercase =word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase =j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase =tuple(snake_case_ )
lowercase =new_word
if len(snake_case_ ) == 1:
break
else:
lowercase =get_pairs(snake_case_ )
lowercase =''' '''.join(snake_case_ )
lowercase =word
return word
def _A( self , snake_case_ ):
lowercase =[]
for token in re.findall(self.pat , snake_case_ ):
lowercase =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def _A( self , snake_case_ ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def _A( self , snake_case_ ):
return self.decoder.get(snake_case_ )
def _A( self , snake_case_ ):
lowercase =''''''.join(snake_case_ )
lowercase =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _A( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
lowercase =0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase =token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _A( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A( self , snake_case_ , snake_case_=False , **snake_case_ ):
lowercase =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
lowercase =''' ''' + text
return (text, kwargs)
| 72
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__UpperCamelCase : List[str] = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__UpperCamelCase : Union[str, Any] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__UpperCamelCase : str = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = CHRF.CHAR_ORDER , lowerCamelCase = CHRF.WORD_ORDER , lowerCamelCase = CHRF.BETA , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , ) ->Union[str, Any]:
'''simple docstring'''
__a = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__a = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
__a = CHRF(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = sb_chrf.corpus_score(lowerCamelCase , lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 448
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Tuple = {
"google/fnet-base": 5_1_2,
"google/fnet-large": 5_1_2,
}
__UpperCAmelCase : int = "▁"
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'token_type_ids']
_A = FNetTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase="<unk>" ,UpperCamelCase="[SEP]" ,UpperCamelCase="<pad>" ,UpperCamelCase="[CLS]" ,UpperCamelCase="[MASK]" ,**UpperCamelCase ,) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case__ :List[str] = (
AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ,normalized=UpperCamelCase )
if isinstance(UpperCamelCase ,UpperCamelCase )
else mask_token
)
super().__init__(
UpperCamelCase ,tokenizer_file=UpperCamelCase ,do_lower_case=UpperCamelCase ,remove_space=UpperCamelCase ,keep_accents=UpperCamelCase ,unk_token=UpperCamelCase ,sep_token=UpperCamelCase ,pad_token=UpperCamelCase ,cls_token=UpperCamelCase ,mask_token=UpperCamelCase ,**UpperCamelCase ,)
snake_case__ :Optional[Any] = do_lower_case
snake_case__ :int = remove_space
snake_case__ :Optional[Any] = keep_accents
snake_case__ :Any = vocab_file
snake_case__ :Optional[int] = False if not self.vocab_file else True
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Tuple = [self.sep_token_id]
snake_case__ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case__ :List[str] = os.path.join(
UpperCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file ,UpperCamelCase )
return (out_vocab_file,)
| 57
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ : Union[str, Any] = 1_6
lowerCamelCase__ : List[Any] = 3_2
def UpperCamelCase ( lowercase_ , lowercase_ = 16 ) -> List[str]:
'''simple docstring'''
lowercase__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[str] = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Dict = 16
elif accelerator.mixed_precision != "no":
lowercase__ : str = 8
else:
lowercase__ : Dict = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ : str = mocked_dataloaders # noqa: F811
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
lowercase__ : List[str] = 2
# New Code #
lowercase__ : List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Dict = config["""lr"""]
lowercase__ : Dict = int(config["""num_epochs"""] )
lowercase__ : int = int(config["""seed"""] )
lowercase__ : int = int(config["""batch_size"""] )
lowercase__ : str = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase_ )
lowercase__ , lowercase__ : Dict = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Any = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase_ ):
lowercase__ : List[str] = model(**lowercase_ )
lowercase__ : Any = output.loss
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : List[str] = model(**lowercase_ )
lowercase__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase_ )
def UpperCamelCase ( ) -> str:
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ : str = parser.parse_args()
lowercase__ : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 12
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a=False , ) -> Optional[Any]:
"""simple docstring"""
output_path.parent.mkdir(parents=__a , exist_ok=__a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , use_external_data_format=__a , enable_onnx_checker=__a , opset_version=__a , )
else:
export(
__a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , opset_version=__a , )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Dict ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
SCREAMING_SNAKE_CASE : int ='''cpu'''
SCREAMING_SNAKE_CASE : Union[str, Any] =StableDiffusionPipeline.from_pretrained(__a , torch_dtype=__a ).to(__a )
SCREAMING_SNAKE_CASE : Optional[Any] =Path(__a )
# TEXT ENCODER
SCREAMING_SNAKE_CASE : List[Any] =pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple =pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE : Optional[int] =pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__a , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=__a , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE : Optional[int] =pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE : List[Any] =pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE : str =output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __a , __a , __a ).to(device=__a , dtype=__a ),
torch.randn(2 ).to(device=__a , dtype=__a ),
torch.randn(2 , __a , __a ).to(device=__a , dtype=__a ),
False,
) , output_path=__a , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=__a , use_external_data_format=__a , )
SCREAMING_SNAKE_CASE : Optional[int] =str(unet_path.absolute().as_posix() )
SCREAMING_SNAKE_CASE : List[str] =os.path.dirname(__a )
SCREAMING_SNAKE_CASE : Optional[int] =onnx.load(__a )
# clean up existing tensor files
shutil.rmtree(__a )
os.mkdir(__a )
# collate external tensor files into one
onnx.save_model(
__a , __a , save_as_external_data=__a , all_tensors_to_one_file=__a , location='''weights.pb''' , convert_attribute=__a , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE : str =pipeline.vae
SCREAMING_SNAKE_CASE : Any =vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE : List[str] =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE : Union[str, Any] =lambda __a , __a : vae_encoder.encode(__a , __a )[0].sample()
onnx_export(
__a , model_args=(
torch.randn(1 , __a , __a , __a ).to(device=__a , dtype=__a ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__a , )
# VAE DECODER
SCREAMING_SNAKE_CASE : str =pipeline.vae
SCREAMING_SNAKE_CASE : Optional[int] =vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE : int =vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE : Dict =vae_encoder.decode
onnx_export(
__a , model_args=(
torch.randn(1 , __a , __a , __a ).to(device=__a , dtype=__a ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__a , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE : Any =pipeline.safety_checker
SCREAMING_SNAKE_CASE : Dict =safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE : Optional[Any] =safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE : List[str] =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __a , __a , __a , ).to(device=__a , dtype=__a ),
torch.randn(1 , __a , __a , __a ).to(device=__a , dtype=__a ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=__a , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE : int =OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
SCREAMING_SNAKE_CASE : Dict =pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE : Optional[Any] =None
SCREAMING_SNAKE_CASE : Optional[Any] =None
SCREAMING_SNAKE_CASE : Union[str, Any] =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=__a , feature_extractor=__a , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__a )
print('''ONNX pipeline saved to''' , __a )
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE : str =OnnxStableDiffusionPipeline.from_pretrained(__a , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
_A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 258
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
# Initialise PyTorch model
__lowercase = LxmertConfig.from_json_file(lowercase__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 634
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple ="longformer"
def __init__( self ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 1 ,_snake_case = 0 ,_snake_case = 2 ,_snake_case = 3_05_22 ,_snake_case = 7_68 ,_snake_case = 12 ,_snake_case = 12 ,_snake_case = 30_72 ,_snake_case = "gelu" ,_snake_case = 0.1 ,_snake_case = 0.1 ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 0.02 ,_snake_case = 1E-12 ,_snake_case = False ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : Union[str, Any] = attention_window
UpperCAmelCase_ : str = sep_token_id
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Optional[int] = onnx_export
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = "default" ,_snake_case = None ):
super().__init__(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = True
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = super().outputs
if self.task == "default":
UpperCAmelCase_ : str = {0: "batch"}
return outputs
@property
def UpperCamelCase__ ( self ):
return 1E-4
@property
def UpperCamelCase__ ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = -1 ,_snake_case = -1 ,_snake_case = False ,_snake_case = None ,):
UpperCAmelCase_ : Any = super().generate_dummy_inputs(
preprocessor=_snake_case ,batch_size=_snake_case ,seq_length=_snake_case ,is_pair=_snake_case ,framework=_snake_case )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : Tuple = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : List[str] = 1
return inputs
| 71
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : Tuple = LEDTokenizer
__a : Union[str, Any] = LEDTokenizerFast
__a : Dict = True
def A__ ( self ) ->int:
super().setUp()
UpperCAmelCase__ :List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase__ :int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase__ :Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase__ :Any = {'unk_token': '<unk>'}
UpperCAmelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def A__ ( self , **A ) ->List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def A__ ( self , **A ) ->List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def A__ ( self , A ) ->Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def A__ ( self ) ->List[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def A__ ( self ) ->Tuple:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCAmelCase__ :Tuple = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ :Optional[int] = tokenizer(A , max_length=len(A ) , padding=A , return_tensors='pt' )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase__ :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ :Dict = tokenizer(A , padding=A , return_tensors='pt' )
self.assertIn('input_ids' , A )
self.assertIn('attention_mask' , A )
self.assertNotIn('labels' , A )
self.assertNotIn('decoder_attention_mask' , A )
@require_torch
def A__ ( self ) ->Dict:
UpperCAmelCase__ :str = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ :int = tokenizer(text_target=A , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def A__ ( self ) ->Optional[int]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ :Optional[Any] = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=A , truncation=A , return_tensors='pt' )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def A__ ( self ) ->Dict:
UpperCAmelCase__ :Optional[int] = ['A long paragraph for summarization.']
UpperCAmelCase__ :int = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ :Optional[int] = tokenizer(A , return_tensors='pt' )
UpperCAmelCase__ :List[Any] = tokenizer(text_target=A , return_tensors='pt' )
UpperCAmelCase__ :List[Any] = inputs['input_ids']
UpperCAmelCase__ :Union[str, Any] = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def A__ ( self ) ->Optional[int]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ :List[str] = ['Summary of the text.', 'Another summary.']
UpperCAmelCase__ :Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase__ :Optional[Any] = tokenizer(A , padding=A )
UpperCAmelCase__ :str = [[0] * len(A ) for x in encoded_output['input_ids']]
UpperCAmelCase__ :List[Any] = tokenizer.pad(A )
self.assertSequenceEqual(outputs['global_attention_mask'] , A )
def A__ ( self ) ->Dict:
pass
def A__ ( self ) ->Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase__ :Tuple = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase__ :Any = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase__ :Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase__ :Dict = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCAmelCase__ :List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase__ :str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 704
|
import unittest
import numpy as np
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCAmelCase__ :Union[str, Any] = np.shape(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = np.shape(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = np.shape(SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
UpperCAmelCase__ :str = (
'Expected the same number of rows for A and B. '
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
UpperCAmelCase__ :Union[str, Any] = (
'Expected the same number of columns for B and C. '
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :int = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase__ :List[str] = np.linalg.inv(SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->None:
UpperCAmelCase__ :str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ :List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ :Dict = np.array([[2, 1], [6, 3]] )
UpperCAmelCase__ :str = schur_complement(A , A , A )
UpperCAmelCase__ :Dict = np.block([[a, b], [b.T, c]] )
UpperCAmelCase__ :Dict = np.linalg.det(A )
UpperCAmelCase__ :Optional[int] = np.linalg.det(A )
UpperCAmelCase__ :Dict = np.linalg.det(A )
self.assertAlmostEqual(A , det_a * det_s )
def A__ ( self ) ->None:
UpperCAmelCase__ :Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ :Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ :List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(A ):
schur_complement(A , A , A )
def A__ ( self ) ->None:
UpperCAmelCase__ :Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ :Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ :Any = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(A ):
schur_complement(A , A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 433
| 0
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '''T5Config'''
class A ( A_ ):
UpperCamelCase_ : int ='''mt5'''
UpperCamelCase_ : List[Any] =MTaConfig
class A ( A_ ):
UpperCamelCase_ : Any ='''mt5'''
UpperCamelCase_ : Union[str, Any] =MTaConfig
class A ( A_ ):
UpperCamelCase_ : List[str] ='''mt5'''
UpperCamelCase_ : Optional[int] =MTaConfig
| 230
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase ) for s in shape] )}.npy'
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 4, 6_4, 6_4) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return image
def _A (self , lowerCAmelCase=False , lowerCAmelCase="CompVis/stable-diffusion-v1-4" ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= 'bf16' if fpaa else None
__lowercase, __lowercase= FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase , subfolder='unet' , dtype=lowerCAmelCase , revision=lowerCAmelCase )
return model, params
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 7_7, 7_6_8) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
| 230
| 1
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: List[str] = FlaxAutoencoderKL
@property
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =4
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =jax.random.PRNGKey(0 )
_lowerCAmelCase =jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase ={
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_lowerCAmelCase =self.dummy_input
return init_dict, inputs_dict
| 149
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : Dict = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def snake_case_ ( lowercase__ : str = "dhaka" , lowercase__ : int = 5 ):
'''simple docstring'''
_lowerCAmelCase =min(lowercase__ , 50 ) # Prevent abuse!
_lowerCAmelCase ={
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_lowerCAmelCase =requests.get("""https://www.google.com/search""" , params=lowercase__ , headers=lowercase__ )
_lowerCAmelCase =BeautifulSoup(html.text , """html.parser""" )
_lowerCAmelCase ="""""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
_lowerCAmelCase =json.dumps(lowercase__ )
_lowerCAmelCase =json.loads(lowercase__ )
_lowerCAmelCase =re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , lowercase__ , )
if not matched_google_image_data:
return 0
_lowerCAmelCase =re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(lowercase__ ) , )
_lowerCAmelCase =re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =urllib.request.build_opener()
_lowerCAmelCase =[
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(lowercase__ )
_lowerCAmelCase =f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__SCREAMING_SNAKE_CASE : Any = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 149
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
A__ : Optional[int] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
A__ : Optional[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
A__ : Tuple = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A_ ( self : Dict ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def A_ ( self : str , __a : Dict , __a : Tuple , __a : Dict=False ) -> str:
'''simple docstring'''
if return_pvalue:
__snake_case : List[Any] = pearsonr(_lowerCAmelCase , _lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0] )}
| 286
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : jnp.ndarray
@flax_register_to_config
class a ( nn.Module ,__lowercase ,__lowercase ):
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
SCREAMING_SNAKE_CASE__ : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE__ : Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE__ : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE__ : int = 1280
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : bool = False
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE: Tuple = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = jax.random.split(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.block_out_channels
__SCREAMING_SNAKE_CASE: Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE: Any = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE: str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE: int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
__SCREAMING_SNAKE_CASE: List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE: List[str] = output_channel
__SCREAMING_SNAKE_CASE: str = block_out_channels[i]
__SCREAMING_SNAKE_CASE: Any = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE: str = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: Tuple = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = down_blocks
# mid
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__SCREAMING_SNAKE_CASE: Optional[int] = []
__SCREAMING_SNAKE_CASE: Tuple = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__SCREAMING_SNAKE_CASE: int = output_channel
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[i]
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[min(i + 1 , len(_lowerCAmelCase ) - 1 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = i == len(_lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__SCREAMING_SNAKE_CASE: Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: int = FlaxUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = output_channel
__SCREAMING_SNAKE_CASE: Union[str, Any] = up_blocks
# out
__SCREAMING_SNAKE_CASE: Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = True , _lowerCAmelCase = False , ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
__SCREAMING_SNAKE_CASE: Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE: Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Union[str, Any] = jnp.expand_dims(_lowerCAmelCase , 0 )
__SCREAMING_SNAKE_CASE: Any = self.time_proj(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE: List[str] = self.conv_in(_lowerCAmelCase )
# 3. down
__SCREAMING_SNAKE_CASE: Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCAmelCase , _lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE: Union[str, Any] = new_down_block_res_samples
# 4. mid
__SCREAMING_SNAKE_CASE: Dict = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = up_block(
_lowerCAmelCase , temb=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train , )
else:
__SCREAMING_SNAKE_CASE: List[str] = up_block(_lowerCAmelCase , temb=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train )
# 6. post-process
__SCREAMING_SNAKE_CASE: Optional[Any] = self.conv_norm_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.silu(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.conv_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCAmelCase )
| 202
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4
| 1
|
"""simple docstring"""
import copy
import re
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = '''hp'''
__UpperCAmelCase = {}
__UpperCAmelCase = None
@classmethod
def a__ ( cls : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = prefix
__magic_name__ = defaults
cls.build_naming_info()
@staticmethod
def a__ ( UpperCamelCase_ : int , UpperCamelCase_ : Any ):
'''simple docstring'''
if len(__snake_case ) == 0:
return ""
__magic_name__ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__snake_case ) + 1 ):
__magic_name__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__magic_name__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCamelCase_ : List[str] ):
__magic_name__ = ''
while integer != 0:
__magic_name__ = chr(ord('A' ) + integer % 1_0 ) + s
integer //= 1_0
return s
__magic_name__ = 0
while True:
__magic_name__ = word + '#' + int_to_alphabetic(__snake_case )
if sword in info["reverse_short_word"]:
continue
else:
__magic_name__ = sword
break
__magic_name__ = short_word
__magic_name__ = word
return short_word
@staticmethod
def a__ ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] ):
'''simple docstring'''
__magic_name__ = param_name.split('_' )
__magic_name__ = [TrialShortNamer.shortname_for_word(__snake_case , __snake_case ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__magic_name__ = ['', '_']
for separator in separators:
__magic_name__ = separator.join(__snake_case )
if shortname not in info["reverse_short_param"]:
__magic_name__ = shortname
__magic_name__ = param_name
return shortname
return param_name
@staticmethod
def a__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
__magic_name__ = TrialShortNamer.shortname_for_key(__snake_case , __snake_case )
__magic_name__ = short_name
__magic_name__ = param_name
@classmethod
def a__ ( cls : Optional[Any] ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
__magic_name__ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
__magic_name__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__snake_case , __snake_case )
__magic_name__ = info
@classmethod
def a__ ( cls : Optional[Any] , UpperCamelCase_ : Tuple ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
__magic_name__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__magic_name__ = cls.NAMING_INFO['short_param'][k]
if isinstance(__snake_case , __snake_case ):
__magic_name__ = 1 if v else 0
__magic_name__ = '' if isinstance(__snake_case , (int, float) ) else '-'
__magic_name__ = f"""{key}{sep}{v}"""
name.append(__snake_case )
return "_".join(__snake_case )
@classmethod
def a__ ( cls : Any , UpperCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__magic_name__ = []
else:
__magic_name__ = repr.split('_' )
__magic_name__ = {}
for value in values:
if "-" in value:
__magic_name__ , __magic_name__ = value.split('-' )
else:
__magic_name__ = re.sub('[0-9.]' , '' , __snake_case )
__magic_name__ = float(re.sub('[^0-9.]' , '' , __snake_case ) )
__magic_name__ = cls.NAMING_INFO['reverse_short_param'][p_k]
__magic_name__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__magic_name__ = cls.DEFAULTS[k]
return parameters
| 545
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : Optional[torch.FloatTensor] = None
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any=0.999 , __lowerCAmelCase : Tuple="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
a__ = []
for i in range(__lowerCAmelCase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = 1
@register_to_config
def __init__( self :Any ,__snake_case :int = 10_00 ,__snake_case :float = 0.00_01 ,__snake_case :float = 0.02 ,__snake_case :str = "linear" ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :int = 0 ,__snake_case :str = "epsilon" ,__snake_case :float = 1.0 ,**__snake_case :str ,) -> List[Any]:
if kwargs.get('set_alpha_to_one' ,__snake_case ) is not None:
a__ = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' ,'1.0.0' ,__snake_case ,standard_warn=__snake_case )
a__ = kwargs['set_alpha_to_one']
if trained_betas is not None:
a__ = torch.tensor(__snake_case ,dtype=torch.floataa )
elif beta_schedule == "linear":
a__ = torch.linspace(__snake_case ,__snake_case ,__snake_case ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__snake_case ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(__snake_case )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
a__ = 1.0 - self.betas
a__ = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
a__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
a__ = 1.0
# setable values
a__ = None
a__ = torch.from_numpy(np.arange(0 ,__snake_case ).copy().astype(np.intaa ) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :torch.FloatTensor ,__snake_case :Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Dict ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
a__ = num_inference_steps
a__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(0 ,__snake_case ) * step_ratio).round().copy().astype(np.intaa )
a__ = torch.from_numpy(__snake_case ).to(__snake_case )
self.timesteps += self.config.steps_offset
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :float = 0.0 ,__snake_case :bool = False ,__snake_case :Optional[torch.FloatTensor] = None ,__snake_case :bool = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
a__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
a__ = self.alphas_cumprod[timestep]
a__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
a__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
a__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
a__ = model_output
elif self.config.prediction_type == "sample":
a__ = model_output
a__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
a__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
a__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
a__ = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__snake_case ,pred_original_sample=__snake_case )
def __len__( self :Tuple ) -> Optional[int]:
return self.config.num_train_timesteps
| 335
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
# Load configuration defined in the metadata file
with open(lowerCamelCase ) as metadata_file:
UpperCamelCase_: Dict = json.load(lowerCamelCase )
UpperCamelCase_: Optional[int] = LukeConfig(use_entity_aware_attention=lowerCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCamelCase_: Optional[int] = torch.load(lowerCamelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
UpperCamelCase_: str = load_original_entity_vocab(lowerCamelCase )
# add an entry for [MASK2]
UpperCamelCase_: Tuple = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase_: str = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase_: Any = AddedToken("""<ent>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
UpperCamelCase_: Optional[Any] = AddedToken("""<ent2>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , """r""" ) as f:
UpperCamelCase_: Optional[Any] = json.load(lowerCamelCase )
UpperCamelCase_: Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = MLukeTokenizer.from_pretrained(lowerCamelCase )
# Initialize the embeddings of the special tokens
UpperCamelCase_: Optional[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
UpperCamelCase_: Tuple = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
UpperCamelCase_: List[str] = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase_: int = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Optional[Any] = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase_: Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase_: List[str] = state_dict[bias_name]
UpperCamelCase_: List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase_: Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase_: Any = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase_: Union[str, Any] = state_dict[prefix + matrix_name]
UpperCamelCase_: int = state_dict[prefix + matrix_name]
UpperCamelCase_: Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase_: List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase_: List[Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase_: List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase_: str = state_dict["""entity_predictions.bias"""]
UpperCamelCase_: Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase_: List[str] = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase_: List[str] = LukeForMaskedLM(config=lowerCamelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
UpperCamelCase_: Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
UpperCamelCase_: Dict = state_dict[key]
else:
UpperCamelCase_: Any = state_dict[key]
UpperCamelCase_, UpperCamelCase_: Any = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if set(lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase_: int = MLukeTokenizer.from_pretrained(lowerCamelCase , task="""entity_classification""" )
UpperCamelCase_: Optional[int] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
UpperCamelCase_: Any = (0, 9)
UpperCamelCase_: Optional[int] = tokenizer(lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase_: Any = model(**lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: Optional[Any] = torch.Size((1, 33, 7_68) )
UpperCamelCase_: List[str] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: Any = torch.Size((1, 1, 7_68) )
UpperCamelCase_: List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase_: List[str] = MLukeTokenizer.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[Any] = """Tokyo is the capital of <mask>."""
UpperCamelCase_: Union[str, Any] = (24, 30)
UpperCamelCase_: str = tokenizer(lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase_: Dict = model(**lowerCamelCase )
UpperCamelCase_: str = encoding["""input_ids"""][0].tolist()
UpperCamelCase_: str = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
UpperCamelCase_: Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase )
UpperCamelCase_: Optional[int] = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase_: Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCamelCase ) )
model.save_pretrained(lowerCamelCase )
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
UpperCamelCase_: Optional[Any] = [json.loads(lowerCamelCase ) for line in open(lowerCamelCase )]
UpperCamelCase_: Union[str, Any] = {}
for entry in data:
UpperCamelCase_: Any = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase_: List[str] = entity_id
break
UpperCamelCase_: Union[str, Any] = F'''{language}:{entity_name}'''
UpperCamelCase_: Dict = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCamelCase_ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 670
|
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(a , 'num_heads' ) )
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=64 , a=3 , a=[16, 48, 96] , a=[1, 3, 6] , a=[1, 2, 10] , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[2, 2, 2] , a=[False, False, True] , a=[0.0, 0.0, 0.0] , a=0.02 , a=1E-12 , a=True , a=True , a=2 , ) -> List[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_sizes
snake_case_ = patch_stride
snake_case_ = patch_padding
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = num_labels
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = num_heads
snake_case_ = stride_kv
snake_case_ = depth
snake_case_ = cls_token
snake_case_ = attention_drop_rate
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Dict:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , a , a , a ) -> int:
snake_case_ = TFCvtModel(config=a )
snake_case_ = model(a , training=a )
snake_case_ = (self.image_size, self.image_size)
snake_case_ , snake_case_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _UpperCamelCase ( self , a , a , a ) -> Dict:
snake_case_ = self.num_labels
snake_case_ = TFCvtForImageClassification(a )
snake_case_ = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = TFCvtModelTester(self )
snake_case_ = TFCvtConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def _UpperCamelCase ( self ) -> Dict:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _UpperCamelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def _UpperCamelCase ( self ) -> Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def _UpperCamelCase ( self ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCamelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(a , a , a ):
snake_case_ = model_class(a )
snake_case_ = model(**self._prepare_for_class(a , a ) )
snake_case_ = outputs.hidden_states
snake_case_ = len(self.model_tester.depth )
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _UpperCamelCase ( self ) -> int:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFCvtModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCAmelCase ( ):
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ) -> Any:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a , return_tensors='tf' )
# forward pass
snake_case_ = model(**a )
# verify the logits
snake_case_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a )
snake_case_ = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1E-4 ) )
| 198
| 0
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
a ={
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
class A_ ( __UpperCAmelCase ):
_UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = []
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : int="<s>" ,SCREAMING_SNAKE_CASE__ : Dict="</s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : List[Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : Dict = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
__lowerCamelCase : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else bos_token
__lowerCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else eos_token
__lowerCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else unk_token
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else pad_token
__lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else cls_token
__lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[int] = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : List[str]):
return self.sp_model.get_piece_size()
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[Any]):
__lowerCamelCase : Any = self.__dict__.copy()
__lowerCamelCase : Any = None
return state
def __setstate__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Tuple = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
return token
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Any = []
__lowerCamelCase : Any = ''
__lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : List[str] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple = False ,SCREAMING_SNAKE_CASE__ : Tuple = None ,SCREAMING_SNAKE_CASE__ : Optional[Any] = True ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
__lowerCamelCase : Optional[Any] = kwargs.pop('use_source_tokenizer' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCamelCase : Tuple = []
__lowerCamelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = []
sub_texts.append(SCREAMING_SNAKE_CASE__)
else:
current_sub_text.append(SCREAMING_SNAKE_CASE__)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__lowerCamelCase : Tuple = re.sub(R' (\[(MASK|SEP)\])' ,R'\1' ,' '.join(SCREAMING_SNAKE_CASE__))
else:
__lowerCamelCase : Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCamelCase : Union[str, Any] = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__)
return clean_text
else:
return text
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
__lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 717
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : List[Any] = state_dict.pop(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = val
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase : Optional[Any] = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__lowerCamelCase : str = value
else:
__lowerCamelCase : List[Any] = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : str = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase : int = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCamelCase : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Optional[Any] = in_proj_weight[:2_5_6, :]
__lowerCamelCase : Dict = in_proj_bias[:2_5_6]
__lowerCamelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
__lowerCamelCase : List[str] = in_proj_bias[2_5_6:5_1_2]
__lowerCamelCase : Dict = in_proj_weight[-2_5_6:, :]
__lowerCamelCase : Dict = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCamelCase : Tuple = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[:2_5_6, :]
__lowerCamelCase : List[str] = in_proj_bias[:2_5_6]
__lowerCamelCase : Union[str, Any] = in_proj_weight[2_5_6:5_1_2, :]
__lowerCamelCase : Optional[int] = in_proj_bias[2_5_6:5_1_2]
__lowerCamelCase : Union[str, Any] = in_proj_weight[-2_5_6:, :]
__lowerCamelCase : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
__lowerCamelCase : List[Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__lowerCamelCase : str = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCamelCase : Tuple = in_proj_weight_cross_attn[:2_5_6, :]
__lowerCamelCase : List[Any] = in_proj_bias_cross_attn[:2_5_6]
__lowerCamelCase : Optional[int] = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
__lowerCamelCase : Tuple = in_proj_bias_cross_attn[2_5_6:5_1_2]
__lowerCamelCase : Optional[Any] = in_proj_weight_cross_attn[-2_5_6:, :]
__lowerCamelCase : Optional[int] = in_proj_bias_cross_attn[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase , __lowerCamelCase : Tuple = image.size
__lowerCamelCase : Dict = max(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
__lowerCamelCase : Optional[int] = target_max_size / current_max_size
__lowerCamelCase : int = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : int = F.to_tensor(lowerCamelCase__ )
__lowerCamelCase : List[str] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
logger.info('Converting model...' )
# load original state dict
__lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase : Optional[Any] = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCamelCase : int = state_dict.pop(lowerCamelCase__ )
__lowerCamelCase : Tuple = val
# create HuggingFace model and load state dict
__lowerCamelCase : Union[str, Any] = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__lowerCamelCase : int = 1_5
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : int = {0: 'table', 1: 'table rotated'}
__lowerCamelCase : Union[str, Any] = idalabel
__lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
__lowerCamelCase : List[str] = 1_2_5
__lowerCamelCase : str = 6
__lowerCamelCase : List[Any] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
__lowerCamelCase : List[str] = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Optional[int] = DetrImageProcessor(
format='coco_detection' , max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
__lowerCamelCase : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
__lowerCamelCase : Optional[Any] = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
__lowerCamelCase : Dict = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=lowerCamelCase__ )
__lowerCamelCase : Dict = Image.open(lowerCamelCase__ ).convert('RGB' )
__lowerCamelCase : Any = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
__lowerCamelCase : Union[str, Any] = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
__lowerCamelCase : Dict = (1, 1_5, 3)
__lowerCamelCase : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
__lowerCamelCase : Dict = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
__lowerCamelCase : Tuple = (1, 1_2_5, 7)
__lowerCamelCase : Optional[int] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
__lowerCamelCase : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
__lowerCamelCase : List[str] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 337
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> int:
_snake_case = RemBertConfig.from_json_file(UpperCAmelCase__ )
print('Building PyTorch model from configuration: {}'.format(str(UpperCAmelCase__ ) ) )
_snake_case = RemBertModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(UpperCAmelCase__ ) )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase : List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 495
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def lowerCamelCase_ ( UpperCAmelCase__=True ):
"""simple docstring"""
a_ = Accelerator(even_batches=UpperCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ):
"""simple docstring"""
if iterable:
a_ = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
else:
a_ = TensorDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
a_ = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
a_ = accelerator.prepare(UpperCAmelCase__ )
return dl
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
a_ = create_dataloader(accelerator=UpperCAmelCase__ , dataset_size=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
a_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
a_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase__ ):
a_ = ddp_model(batch[0].float() )
a_ = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = True
a_ = False
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
a_ = train_dl.batch_sampler.even_batches
a_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = True
a_ = False
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
a_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
a_ = accelerator.state.distributed_type
a_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase__ )
a_ = original_state
if __name__ == "__main__":
main()
| 483
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Optional[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : str = spec.loader.load_module()
__A : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
__lowerCAmelCase = False
# source code of `config_class`
__lowerCAmelCase = inspect.getsource(UpperCamelCase__ )
__lowerCAmelCase = _re_checkpoint.findall(UpperCamelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__lowerCAmelCase , __lowerCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__lowerCAmelCase = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__lowerCAmelCase = True
break
__lowerCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__lowerCAmelCase = """\n""".join(sorted(UpperCamelCase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 334
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 288
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__="attention" ) -> Dict:
A_ = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
A_ = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
A_ = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
A_ = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> Optional[Any]:
if split_mlp_wi:
A_ = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
A_ = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
A_ = (wi_a, wi_a)
else:
A_ = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
A_ = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def UpperCAmelCase__ ( UpperCAmelCase__, *, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ = traverse_util.flatten_dict(variables["""target"""] )
A_ = {"""/""".join(UpperCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A_ = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""", UpperCAmelCase__ )
A_ = collections.OrderedDict()
# Shared embeddings.
A_ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
A_ = tax_layer_norm_lookup(UpperCAmelCase__, UpperCAmelCase__, """encoder""", """pre_attention_layer_norm""" )
A_ , A_ , A_ , A_ = tax_attention_lookup(UpperCAmelCase__, UpperCAmelCase__, """encoder""", """attention""" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 1 (MLP).
A_ = tax_layer_norm_lookup(UpperCAmelCase__, UpperCAmelCase__, """encoder""", """pre_mlp_layer_norm""" )
A_ , A_ = tax_mlp_lookup(UpperCAmelCase__, UpperCAmelCase__, """encoder""", UpperCAmelCase__ )
A_ = layer_norm
if split_mlp_wi:
A_ = wi[0].T
A_ = wi[1].T
else:
A_ = wi.T
A_ = wo.T
A_ = old[
"""encoder/relpos_bias/rel_embedding"""
].T
A_ = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
A_ = tax_layer_norm_lookup(UpperCAmelCase__, UpperCAmelCase__, """decoder""", """pre_self_attention_layer_norm""" )
A_ , A_ , A_ , A_ = tax_attention_lookup(UpperCAmelCase__, UpperCAmelCase__, """decoder""", """self_attention""" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 1 (Cross Attention).
A_ = tax_layer_norm_lookup(UpperCAmelCase__, UpperCAmelCase__, """decoder""", """pre_cross_attention_layer_norm""" )
A_ , A_ , A_ , A_ = tax_attention_lookup(UpperCAmelCase__, UpperCAmelCase__, """decoder""", """encoder_decoder_attention""" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 2 (MLP).
A_ = tax_layer_norm_lookup(UpperCAmelCase__, UpperCAmelCase__, """decoder""", """pre_mlp_layer_norm""" )
A_ , A_ = tax_mlp_lookup(UpperCAmelCase__, UpperCAmelCase__, """decoder""", UpperCAmelCase__ )
A_ = layer_norm
if split_mlp_wi:
A_ = wi[0].T
A_ = wi[1].T
else:
A_ = wi.T
A_ = wo.T
A_ = old["""decoder/decoder_norm/scale"""]
A_ = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A_ = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
A_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A_ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A_ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
A_ = state_dict["""shared.weight"""]
return state_dict
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
A_ = convert_tax_to_pytorch(UpperCAmelCase__, num_layers=config.num_layers, is_encoder_only=UpperCAmelCase__ )
A_ = make_state_dict(UpperCAmelCase__, UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = False ) -> List[Any]:
A_ = TaConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A_ = TaEncoderModel(UpperCAmelCase__ )
else:
A_ = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 288
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Optional[int] = 'decision_transformer'
a__: str = ['past_key_values']
a__: Dict = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase=17 , UpperCAmelCase=4 , UpperCAmelCase=128 , UpperCAmelCase=4096 , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=1024 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase="relu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0_2 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=5_0256 , UpperCAmelCase=5_0256 , UpperCAmelCase=False , UpperCAmelCase=False , **UpperCAmelCase , ):
lowerCamelCase_ = state_dim
lowerCamelCase_ = act_dim
lowerCamelCase_ = hidden_size
lowerCamelCase_ = max_ep_len
lowerCamelCase_ = action_tanh
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = n_inner
lowerCamelCase_ = activation_function
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scale_attn_weights
lowerCamelCase_ = use_cache
lowerCamelCase_ = scale_attn_by_inverse_layer_idx
lowerCamelCase_ = reorder_and_upcast_attn
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 29
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = ["pixel_values"]
def __init__( self , a = True , a = None , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {"""shortest_edge""": 3_84}
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = do_resize
_UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCamelCase = size["""shortest_edge"""]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase = int(shortest_edge / crop_pct )
_UpperCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
_UpperCamelCase = resize(image=a , size=a , resample=a , data_format=a , **a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a , size=(shortest_edge, shortest_edge) , data_format=a , **a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a , size=(shortest_edge, shortest_edge) , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> Optional[int]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
| 612
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ (__a , unittest.TestCase ):
__magic_name__ = GPTSanJapaneseTokenizer
__magic_name__ = False
__magic_name__ = {'''do_clean_text''': False, '''add_prefix_space''': False}
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
super().setUp()
# fmt: off
UpperCAmelCase_ : List[str] = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : Any = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(A__ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Tuple = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCAmelCase_ : Optional[Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_input_output_texts(A__ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
UpperCAmelCase_ : Any = tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Any = "こんにちは、世界。 こんばんは、㔺界。"
UpperCAmelCase_ : str = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCAmelCase_ : int = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase_ : str = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase_ : str = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(A__ , A__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Union[str, Any] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCAmelCase_ : List[str] = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCAmelCase_ : List[str] = tokenizer.encode(A__ )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(A__ )
self.assertEqual(A__ , A__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。"
UpperCAmelCase_ : Union[str, Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : Any = "こんにちは、世界。こんばんは、世界。😀"
UpperCAmelCase_ : Tuple = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCAmelCase_ : Any = tokenizer.encode(A__ , prefix_text=A__ )
UpperCAmelCase_ : Dict = tokenizer.decode(A__ )
UpperCAmelCase_ : int = tokenizer.decode(A__ )
UpperCAmelCase_ : List[str] = tokenizer.decode(A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : int = "こんにちは、世界。"
UpperCAmelCase_ : Any = "こんばんは、㔺界。😀"
UpperCAmelCase_ : Optional[int] = len(tokenizer.encode(A__ ) ) - 2
UpperCAmelCase_ : Dict = len(tokenizer.encode(A__ ) ) - 2
UpperCAmelCase_ : Tuple = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase_ : str = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase_ : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase_ : str = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : int = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : Tuple = tokenizer(A__ , prefix_text=A__ ).token_type_ids
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : Any = tokenizer.encode("あンいワ" )
UpperCAmelCase_ : str = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(A__ ) , tokenizer.decode(A__ ) )
self.assertEqual(tokenizer.decode(A__ ) , tokenizer.decode(A__ ) )
self.assertNotEqual(A__ , A__ )
self.assertNotEqual(A__ , A__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : int = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCAmelCase_ : Dict = tokenizer(A__ , padding=A__ )
UpperCAmelCase_ : int = tokenizer.batch_encode_plus(A__ , padding=A__ )
# fmt: off
UpperCAmelCase_ : Any = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
UpperCAmelCase_ : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase_ : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A__ )
self.assertListEqual(x_token.token_type_ids , A__ )
self.assertListEqual(x_token.attention_mask , A__ )
self.assertListEqual(x_token_a.input_ids , A__ )
self.assertListEqual(x_token_a.token_type_ids , A__ )
self.assertListEqual(x_token_a.attention_mask , A__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# tokenizer has no padding token
pass
| 700
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[Any] = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def snake_case ( ):
UpperCAmelCase_ : str = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Union[str, Any] = 10_00
UpperCAmelCase_ : Any = "huggingface/label-files"
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(A__ ,A__ ,repo_type="dataset" ) ) ,"r" ) )
UpperCAmelCase_ : Optional[int] = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : str = CvtConfig(num_labels=A__ ,idalabel=A__ ,labelaid=A__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" ,1 )[-1][4:6] == "13":
UpperCAmelCase_ : Optional[int] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" ,1 )[-1][4:6] == "21":
UpperCAmelCase_ : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ : Optional[int] = [2, 2, 20]
UpperCAmelCase_ : List[str] = [3, 12, 16]
UpperCAmelCase_ : Optional[Any] = [1_92, 7_68, 10_24]
UpperCAmelCase_ : Tuple = CvtForImageClassification(A__ )
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[str] = torch.load(A__ ,map_location=torch.device("cpu" ) )
UpperCAmelCase_ : int = OrderedDict()
UpperCAmelCase_ : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ : Optional[Any] = list_of_state_dict + cls_token(A__ )
UpperCAmelCase_ : Any = list_of_state_dict + embeddings(A__ )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ : Dict = list_of_state_dict + attention(A__ ,A__ )
UpperCAmelCase_ : Union[str, Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A__ )
for i in range(len(A__ ) ):
UpperCAmelCase_ : Any = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 463
| 0
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :int , *__A :int , **__A :Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 6
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18
| 0
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase__ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase__ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase__ = """transformer"""
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase__ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase__ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase__ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase__ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase__ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase__ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase__ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase__ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase__ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 552
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class a__ ( snake_case__ ):
def __init__( self , *_A , **_A ):
"""simple docstring"""
super().__init__(*_A , **_A )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __SCREAMING_SNAKE_CASE( self , _A=None ):
"""simple docstring"""
__lowerCAmelCase = {}
if top_k is not None:
__lowerCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self , _A , **_A ):
"""simple docstring"""
return super().__call__(_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = load_image(_A )
__lowerCAmelCase = self.image_processor(images=_A , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.model(**_A )
return model_outputs
def __SCREAMING_SNAKE_CASE( self , _A , _A=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase = probs.topk(_A )
elif self.framework == "tf":
__lowerCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowerCAmelCase = tf.math.top_k(_A , k=_A )
__lowerCAmelCase , __lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowerCAmelCase = scores.tolist()
__lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A , _A )]
| 552
| 1
|
from manim import *
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def A_ ( self ):
snake_case__ = Rectangle(height=0.5 , width=0.5 )
snake_case__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
snake_case__ = [mem.copy() for i in range(6 )]
snake_case__ = [mem.copy() for i in range(6 )]
snake_case__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
snake_case__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
snake_case__ = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
snake_case__ = Text("CPU" , font_size=24 )
snake_case__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
snake_case__ = [mem.copy() for i in range(1 )]
snake_case__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
snake_case__ = Text("GPU" , font_size=24 )
snake_case__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
gpu.align_to(lowerCamelCase , lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase )
snake_case__ = [mem.copy() for i in range(6 )]
snake_case__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
snake_case__ = Text("Model" , font_size=24 )
snake_case__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase , run_time=1 ) , Create(lowerCamelCase , run_time=1 ) , Create(lowerCamelCase , run_time=1 ) , )
snake_case__ = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
snake_case__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase , run_time=2.5 ) , Write(lowerCamelCase ) , Write(lowerCamelCase ) )
self.add(lowerCamelCase )
snake_case__ = []
snake_case__ = []
snake_case__ = []
for i, rect in enumerate(lowerCamelCase ):
snake_case__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase , opacity=0.7 )
cpu_target.move_to(lowerCamelCase )
cpu_target.generate_target()
snake_case__ = 0.4_6 / 4
snake_case__ = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase , buff=0.0 )
cpu_targs.append(lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase ) )
second_animations.append(MoveToTarget(lowerCamelCase , run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(*lowerCamelCase )
self.wait()
| 276
|
import math
__magic_name__ = 10
__magic_name__ = 7
__magic_name__ = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 20 ):
snake_case__ = math.comb(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCAmelCase )
snake_case__ = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 276
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def __UpperCamelCase ( snake_case , snake_case , snake_case = 1_6_0_0_0 ) -> Tuple:
'''simple docstring'''
__A = int(round(sample_rate * max_length ) )
if len(snake_case ) <= sample_length:
return wav
__A = randint(0 , len(snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(default=_a , metadata={'''help''': '''Name of a dataset from the datasets package'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''A file containing the training audio paths and labels.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''A file containing the validation audio paths and labels.'''})
lowerCamelCase__ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCamelCase__ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCamelCase__ = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
lowerCamelCase__ = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''})
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''})
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Name or path of preprocessor config.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''})
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
__A = DatasetDict()
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__A = feature_extractor.model_input_names[0]
def train_transforms(snake_case ):
__A = []
for audio in batch[data_args.audio_column_name]:
__A = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case )
__A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__A = {model_input_name: inputs.get(snake_case )}
__A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case ):
__A = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
__A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__A = {model_input_name: inputs.get(snake_case )}
__A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__A = raw_datasets['''train'''].features[data_args.label_column_name].names
__A , __A = {}, {}
for i, label in enumerate(snake_case ):
__A = str(snake_case )
__A = label
# Load the accuracy metric from the datasets package
__A = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
__A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case , references=eval_pred.label_ids )
__A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case ) , labelaid=snake_case , idalabel=snake_case , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__A = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case , output_all_columns=snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__A = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case , output_all_columns=snake_case )
# Initialize our trainer
__A = Trainer(
model=snake_case , args=snake_case , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , )
# Training
if training_args.do_train:
__A = None
if training_args.resume_from_checkpoint is not None:
__A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A = last_checkpoint
__A = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__A = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__A = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 341
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Any:
__A = 3
__A = 2_50
__A = ids_tensor((batch_size, length) , UpperCAmelCase )
__A = torch.ones((batch_size, length) , device=UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
__A , __A = self._get_tensors(5 )
__A = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A , __A = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A , __A = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase , UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
__A = MaxLengthCriteria(max_length=10 )
__A , __A = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A , __A = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A , __A = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase , UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
__A = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__A , __A = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A , __A = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A , __A = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A , __A = self._get_tensors(5 )
__A = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase , UpperCAmelCase ) )
__A = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase , UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self )-> int:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__A = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
| 341
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Dict = 8
def __magic_name__ ( __snake_case : List[str] , __snake_case : Union[str, Any]=BITS ) -> List[Any]:
lowercase : Optional[Any] = x.device
lowercase : Optional[int] = (x * 255).int().clamp(0 , 255 )
lowercase : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase__ )
lowercase : Union[str, Any] = rearrange(UpperCamelCase__ , "d -> d 1 1" )
lowercase : str = rearrange(UpperCamelCase__ , "b c h w -> b c 1 h w" )
lowercase : Dict = ((x & mask) != 0).float()
lowercase : Optional[Any] = rearrange(UpperCamelCase__ , "b c d h w -> b (c d) h w" )
lowercase : List[str] = bits * 2 - 1
return bits
def __magic_name__ ( __snake_case : str , __snake_case : Tuple=BITS ) -> Union[str, Any]:
lowercase : Dict = x.device
lowercase : Tuple = (x > 0).int()
lowercase : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase__ , dtype=torch.intaa )
lowercase : Optional[Any] = rearrange(UpperCamelCase__ , "d -> d 1 1" )
lowercase : str = rearrange(UpperCamelCase__ , "b (c d) h w -> b c d h w" , d=8 )
lowercase : str = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def __magic_name__ ( self : List[Any] , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : float = 0.0 , __snake_case : bool = True , __snake_case : str=None , __snake_case : bool = True , ) -> Dict:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase : Any = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase : int = self.alphas_cumprod[timestep]
lowercase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase : Optional[Any] = self.bit_scale
if self.config.clip_sample:
lowercase : Optional[Any] = torch.clamp(UpperCamelCase__ , -scale , UpperCamelCase__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase : int = self._get_variance(UpperCamelCase__ , UpperCamelCase__ )
lowercase : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Any = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase : Optional[int] = model_output.device if torch.is_tensor(UpperCamelCase__ ) else "cpu"
lowercase : List[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ ).to(UpperCamelCase__ )
lowercase : Tuple = self._get_variance(UpperCamelCase__ , UpperCamelCase__ ) ** 0.5 * eta * noise
lowercase : List[Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def __magic_name__ ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : Tuple="epsilon" , __snake_case : int=None , __snake_case : bool = True , ) -> Optional[Any]:
lowercase : Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase , lowercase : List[str] = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
lowercase : Optional[Any] = None
# 1. compute alphas, betas
lowercase : List[str] = self.alphas_cumprod[t]
lowercase : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase : int = 1 - alpha_prod_t
lowercase : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase : int = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
lowercase : int = self.bit_scale
if self.config.clip_sample:
lowercase : List[Any] = torch.clamp(UpperCamelCase__ , -scale , UpperCamelCase__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase : List[str] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase : List[Any] = 0
if t > 0:
lowercase : Tuple = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase__ ).to(model_output.device )
lowercase : List[str] = (self._get_variance(UpperCamelCase__ , predicted_variance=UpperCamelCase__ ) ** 0.5) * noise
lowercase : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
class a__ ( a_ ):
def __init__( self , _a , _a , _a = 1.0 , ):
super().__init__()
lowercase : Dict = bit_scale
lowercase : Any = (
ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self , _a = 256 , _a = 256 , _a = 50 , _a = None , _a = 1 , _a = "pil" , _a = True , **_a , ):
lowercase : Optional[int] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__a , )
lowercase : Optional[int] = decimal_to_bits(__a ) * self.bit_scale
lowercase : List[Any] = latents.to(self.device )
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowercase : Tuple = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase : str = self.scheduler.step(__a , __a , __a ).prev_sample
lowercase : Optional[Any] = bits_to_decimal(__a )
if output_type == "pil":
lowercase : Any = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 361
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase ):
def __init__( self : Dict , __a : List[Any] , __a : Optional[Any] ) -> Dict:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Tuple , __a : int = 1 , __a : int = 1_0_0 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[float] = None , __a : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__UpperCAmelCase = int(__a )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
__UpperCAmelCase = int(__a )
__UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__UpperCAmelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
# set step values
self.scheduler.set_timesteps(__a , device=audio.device )
__UpperCAmelCase = self.scheduler.timesteps.to(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(__a , __a ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase = self.scheduler.step(__a , __a , __a ).prev_sample
__UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a )
| 262
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: Any=0.999 , lowerCAmelCase: List[Any]="cosine" , ) -> Any:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase: str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase: str ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase : Optional[Any] = []
for i in range(lowerCAmelCase ):
_UpperCAmelCase : str = i / num_diffusion_timesteps
_UpperCAmelCase : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase ) / alpha_bar_fn(lowerCAmelCase ) , lowerCAmelCase ) )
return torch.tensor(lowerCAmelCase , dtype=torch.floataa )
class a ( UpperCAmelCase , UpperCAmelCase ):
_lowercase = [e.name for e in KarrasDiffusionSchedulers]
_lowercase = 2
@register_to_config
def __init__( self , A_ = 1000 , A_ = 0.0_00_85 , A_ = 0.0_12 , A_ = "linear" , A_ = None , A_ = "epsilon" , A_ = False , A_ = False , A_ = 1.0 , A_ = "linspace" , A_ = 0 , ):
'''simple docstring'''
if trained_betas is not None:
_UpperCAmelCase : str = torch.tensor(A_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase : List[Any] = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : Dict = betas_for_alpha_bar(A_ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_UpperCAmelCase : Tuple = betas_for_alpha_bar(A_ , alpha_transform_type="exp" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase : Union[str, Any] = 1.0 - self.betas
_UpperCAmelCase : Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A_ , A_ , A_ )
_UpperCAmelCase : Optional[int] = use_karras_sigmas
def _UpperCAmelCase ( self , A_ , A_=None ):
'''simple docstring'''
if schedule_timesteps is None:
_UpperCAmelCase : Dict = self.timesteps
_UpperCAmelCase : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCAmelCase : Optional[Any] = 1 if len(A_ ) > 1 else 0
else:
_UpperCAmelCase : List[Any] = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
_UpperCAmelCase : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _UpperCAmelCase ( self , A_ , A_ , ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.index_for_timestep(A_ )
_UpperCAmelCase : str = self.sigmas[step_index]
_UpperCAmelCase : Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = num_inference_steps
_UpperCAmelCase : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase : List[Any] = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Any = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Any = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
_UpperCAmelCase : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCAmelCase : Optional[Any] = np.log(A_ )
_UpperCAmelCase : Dict = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ )
if self.config.use_karras_sigmas:
_UpperCAmelCase : Union[str, Any] = self._convert_to_karras(in_sigmas=A_ , num_inference_steps=self.num_inference_steps )
_UpperCAmelCase : List[str] = np.array([self._sigma_to_t(A_ , A_ ) for sigma in sigmas] )
_UpperCAmelCase : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCAmelCase : Dict = torch.from_numpy(A_ ).to(device=A_ )
_UpperCAmelCase : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCAmelCase : Tuple = torch.from_numpy(A_ )
_UpperCAmelCase : int = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith("mps" ):
# mps does not support float64
_UpperCAmelCase : int = timesteps.to(A_ , dtype=torch.floataa )
else:
_UpperCAmelCase : str = timesteps.to(device=A_ )
# empty dt and derivative
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase : str = defaultdict(A_ )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = np.log(A_ )
# get distribution
_UpperCAmelCase : int = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_UpperCAmelCase : Union[str, Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_UpperCAmelCase : Union[str, Any] = low_idx + 1
_UpperCAmelCase : Union[str, Any] = log_sigmas[low_idx]
_UpperCAmelCase : Union[str, Any] = log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase : Dict = (low - log_sigma) / (low - high)
_UpperCAmelCase : List[str] = np.clip(A_ , 0 , 1 )
# transform interpolation to time range
_UpperCAmelCase : Dict = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase : Dict = t.reshape(sigma.shape )
return t
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : float = in_sigmas[-1].item()
_UpperCAmelCase : float = in_sigmas[0].item()
_UpperCAmelCase : str = 7.0 # 7.0 is the value used in the paper
_UpperCAmelCase : Union[str, Any] = np.linspace(0 , 1 , A_ )
_UpperCAmelCase : Optional[Any] = sigma_min ** (1 / rho)
_UpperCAmelCase : Union[str, Any] = sigma_max ** (1 / rho)
_UpperCAmelCase : Dict = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.dt is None
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.index_for_timestep(A_ )
# advance index counter by 1
_UpperCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase : Optional[int] = self.sigmas[step_index]
_UpperCAmelCase : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_UpperCAmelCase : Dict = self.sigmas[step_index - 1]
_UpperCAmelCase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase : str = 0
_UpperCAmelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase : int = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase : Any = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_UpperCAmelCase : Union[str, Any] = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
_UpperCAmelCase : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase : Optional[Any] = sigma_next - sigma_hat
# store for 2nd order step
_UpperCAmelCase : Optional[Any] = derivative
_UpperCAmelCase : int = dt
_UpperCAmelCase : List[Any] = sample
else:
# 2. 2nd order / Heun's method
_UpperCAmelCase : str = (sample - pred_original_sample) / sigma_next
_UpperCAmelCase : Any = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_UpperCAmelCase : Union[str, Any] = self.dt
_UpperCAmelCase : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _UpperCAmelCase ( self , A_ , A_ , A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
_UpperCAmelCase : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCAmelCase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = self.timesteps.to(original_samples.device )
_UpperCAmelCase : Optional[int] = timesteps.to(original_samples.device )
_UpperCAmelCase : List[Any] = [self.index_for_timestep(A_ , A_ ) for t in timesteps]
_UpperCAmelCase : Dict = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCAmelCase : Dict = sigma.unsqueeze(-1 )
_UpperCAmelCase : Union[str, Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 467
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( UpperCAmelCase ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 467
| 1
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( snake_case__ ):
_A = (IPNDMScheduler,)
_A = (("""num_inference_steps""", 50),)
def _a(self : Union[str, Any] , **snake_case : Optional[Any] ) -> Optional[Any]:
_lowercase : List[str] = {"num_train_timesteps": 1000}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def _a(self : List[str] , snake_case : Dict=0 , **snake_case : Any ) -> str:
_lowercase : Tuple = dict(self.forward_default_kwargs )
_lowercase : Dict = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
_lowercase : Union[str, Any] = self.dummy_sample
_lowercase : Optional[Any] = 0.1 * sample
_lowercase : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
_lowercase : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
_lowercase : Optional[int] = dummy_past_residuals[:]
if time_step is None:
_lowercase : Any = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
_lowercase : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
_lowercase : Optional[Any] = dummy_past_residuals[:]
_lowercase : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
_lowercase : Optional[int] = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowercase : str = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
_lowercase : Any = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a(self : int ) -> Dict:
pass
def _a(self : Dict , snake_case : Dict=0 , **snake_case : Optional[Any] ) -> Union[str, Any]:
_lowercase : Optional[int] = dict(self.forward_default_kwargs )
_lowercase : Optional[Any] = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
_lowercase : List[Any] = self.dummy_sample
_lowercase : Union[str, Any] = 0.1 * sample
_lowercase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config()
_lowercase : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : Tuple = dummy_past_residuals[:]
if time_step is None:
_lowercase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
_lowercase : Any = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
_lowercase : Any = dummy_past_residuals[:]
_lowercase : str = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
_lowercase : List[str] = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowercase : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
_lowercase : Any = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a(self : Tuple , **snake_case : Optional[int] ) -> Dict:
_lowercase : Union[str, Any] = self.scheduler_classes[0]
_lowercase : Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
_lowercase : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
_lowercase : Tuple = 10
_lowercase : List[Any] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
_lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowercase : Any = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowercase : Tuple = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def _a(self : List[Any] ) -> List[Any]:
_lowercase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowercase : int = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
_lowercase : Union[str, Any] = self.get_scheduler_config()
_lowercase : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
_lowercase : Union[str, Any] = self.dummy_sample
_lowercase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
_lowercase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowercase : Tuple = dummy_past_residuals[:]
_lowercase : int = scheduler.timesteps[5]
_lowercase : Union[str, Any] = scheduler.timesteps[6]
_lowercase : Dict = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
_lowercase : str = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowercase : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
_lowercase : Any = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a(self : List[Any] ) -> Any:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ , time_step=SCREAMING_SNAKE_CASE__ )
def _a(self : Any ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=SCREAMING_SNAKE_CASE__ )
def _a(self : int ) -> str:
_lowercase : Dict = self.full_loop()
_lowercase : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 461
|
'''simple docstring'''
import numpy as np
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 582
| 0
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
UpperCAmelCase : List[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCAmelCase : Optional[int] = 1
if upper_limit > 0:
UpperCAmelCase : int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
lowercase__ = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 695
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCamelCase = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = """maskformer"""
_snake_case : Any = {"""hidden_size""": """mask_feature_size"""}
_snake_case : Union[str, Any] = ["""resnet""", """swin"""]
_snake_case : List[str] = ["""detr"""]
def __init__( self :Any , lowerCamelCase__ :int = 2_56 , lowerCamelCase__ :int = 2_56 , lowerCamelCase__ :float = 0.1 , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[Dict] = None , lowerCamelCase__ :Optional[Dict] = None , lowerCamelCase__ :float = 0.02 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 20.0 , lowerCamelCase__ :Optional[bool] = None , **lowerCamelCase__ :str , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ :Optional[Any] = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = backbone_config.pop("""model_type""" )
UpperCamelCase__ :Any = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ :Optional[int] = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ :Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ :Optional[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :Any = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ :Optional[Any] = config_class.from_dict(lowerCamelCase__ )
UpperCamelCase__ :Tuple = backbone_config
UpperCamelCase__ :Optional[int] = decoder_config
# main feature dimension for the model
UpperCamelCase__ :Union[str, Any] = fpn_feature_size
UpperCamelCase__ :int = mask_feature_size
# initializer
UpperCamelCase__ :Union[str, Any] = init_std
UpperCamelCase__ :Tuple = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ :Dict = cross_entropy_weight
UpperCamelCase__ :int = dice_weight
UpperCamelCase__ :Optional[Any] = mask_weight
UpperCamelCase__ :int = use_auxiliary_loss
UpperCamelCase__ :List[str] = no_object_weight
UpperCamelCase__ :Any = output_auxiliary_logits
UpperCamelCase__ :Union[str, Any] = self.decoder_config.encoder_attention_heads
UpperCamelCase__ :Any = self.decoder_config.num_hidden_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , **lowerCamelCase__ :List[Any] ):
return cls(
backbone_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __a ( self :List[Any] ):
UpperCamelCase__ :List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :Dict = self.backbone_config.to_dict()
UpperCamelCase__ :str = self.decoder_config.to_dict()
UpperCamelCase__ :Optional[int] = self.__class__.model_type
return output
| 45
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : int = False
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ):
'''simple docstring'''
lowercase :Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowercase :Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=9_9 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :Tuple = parent
lowercase :Tuple = batch_size
lowercase :Optional[Any] = seq_length
lowercase :Optional[Any] = is_training
lowercase :Optional[Any] = use_input_mask
lowercase :List[Any] = use_token_type_ids
lowercase :str = use_labels
lowercase :List[str] = vocab_size
lowercase :str = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Dict = num_attention_heads
lowercase :Any = intermediate_size
lowercase :List[str] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :List[Any] = attention_probs_dropout_prob
lowercase :List[Any] = max_position_embeddings
lowercase :List[Any] = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Any = num_labels
lowercase :int = num_choices
lowercase :Dict = scope
lowercase :Dict = embedding_size
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :int = None
if self.use_input_mask:
lowercase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :Tuple = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :Union[str, Any] = None
lowercase :int = None
lowercase :str = None
if self.use_labels:
lowercase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Dict = TFMobileBertModel(config=snake_case__ )
lowercase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
lowercase :Optional[int] = [input_ids, input_mask]
lowercase :Optional[int] = model(snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Any = TFMobileBertForMaskedLM(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
lowercase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ )
lowercase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Tuple = self.num_choices
lowercase :Any = TFMobileBertForMultipleChoice(config=snake_case__ )
lowercase :Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[str] = TFMobileBertForTokenClassification(config=snake_case__ )
lowercase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ )
lowercase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Dict = config_and_inputs
lowercase :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def __snake_case ( self : int ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase :List[str] = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase :Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase :List[Any] = model(snake_case__ )[0]
lowercase :Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case__ )
lowercase :Optional[int] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 677
| 0
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float.")
if len(lowerCAmelCase_) != 0:
a_ =len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float)):
raise error
a_ =rows
else:
a_ =[]
def lowercase_ ( self) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.rows)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.rows[0])
@property
def lowercase_ ( self) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ ( self) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
a_ =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def lowercase_ ( self) -> bool:
"""simple docstring"""
return bool(self.determinant())
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(lowerCAmelCase_).determinant()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase_ , lowerCAmelCase_)
return -1 * self.get_minor(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCAmelCase_ , lowerCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
a_ =[
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(lowerCAmelCase_)
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
a_ =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse")
return self.adjugate() * (1 / determinant)
def __repr__( self) -> str:
"""simple docstring"""
return str(self.rows)
def __str__( self) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase_) for value in row]) + ".]"
for row in self.rows
])
+ "]"
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> None:
"""simple docstring"""
a_ =TypeError("Row must be a list containing all ints and/or floats")
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float)):
raise type_error
if len(lowerCAmelCase_) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix")
if position is None:
self.rows.append(lowerCAmelCase_)
else:
a_ =self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> None:
"""simple docstring"""
a_ =TypeError(
"Column must be a list containing all ints and/or floats")
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase_ , (int, float)):
raise type_error
if len(lowerCAmelCase_) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix")
if position is None:
a_ =[self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
a_ =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self , lowerCAmelCase_) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase_) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second")
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase_ , lowerCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix")
def __pow__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
raise TypeError("A Matrix can only be raised to the power of an int")
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power")
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power")
a_ =self
for _ in range(other - 1):
result *= self
return result
@classmethod
def lowercase_ ( cls , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.