code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__magic_name__: List[Any] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> str:
__magic_name__ : List[Any] = None
__magic_name__ : Any = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__magic_name__ : Optional[int] = os.path.abspath("""examples""" )
for item in os.listdir(lowerCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
__magic_name__ : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase__ , feature_script=lowerCAmelCase__ , tested_section="""main()""" if parser_only else """training_function()""" , ):
__magic_name__ : Optional[Any] = compare_against_test(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = """\n""".join(lowerCAmelCase__ )
if special_strings is not None:
for string in special_strings:
__magic_name__ : List[str] = diff.replace(lowerCAmelCase__ , """""" )
self.assertEqual(lowerCAmelCase__ , """""" )
def __magic_name__ ( self ) -> int:
self.one_complete_example("""complete_nlp_example.py""" , lowerCAmelCase__ )
self.one_complete_example("""complete_nlp_example.py""" , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Dict:
__magic_name__ : List[Any] = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__magic_name__ : Any = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.one_complete_example("""complete_cv_example.py""" , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = False
@classmethod
def __magic_name__ ( cls ) -> Any:
super().setUpClass()
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : str = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__magic_name__ : Union[str, Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls ) -> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Any = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__magic_name__ : Tuple = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def __magic_name__ ( self ) -> str:
__magic_name__ : int = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__magic_name__ : int = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ )
self.assertNotIn("""epoch 0:""" , lowerCAmelCase__ )
self.assertIn("""epoch 1:""" , lowerCAmelCase__ )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__magic_name__ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ )
if torch.cuda.is_available():
__magic_name__ : Optional[Any] = torch.cuda.device_count()
else:
__magic_name__ : Optional[Any] = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , lowerCAmelCase__ )
self.assertIn("""epoch 1:""" , lowerCAmelCase__ )
else:
self.assertIn("""epoch 0:""" , lowerCAmelCase__ )
self.assertIn("""epoch 1:""" , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__magic_name__ : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ )
__magic_name__ : Tuple = re.findall("""({.+})""" , lowerCAmelCase__ )
__magic_name__ : List[str] = [r for r in results if """accuracy""" in r][-1]
__magic_name__ : Any = ast.literal_eval(lowerCAmelCase__ )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Any = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
__magic_name__ : Any = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , """tracking""" ) ) )
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[Any] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Tuple = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 324
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.0_2 , ) -> int:
__magic_name__ : List[str] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Any = seq_length
__magic_name__ : str = is_training
__magic_name__ : List[str] = use_input_mask
__magic_name__ : Union[str, Any] = use_token_type_ids
__magic_name__ : List[str] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : List[Any] = rotary_dim
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Optional[int] = None
__magic_name__ : List[Any] = vocab_size - 1
__magic_name__ : Any = vocab_size - 1
__magic_name__ : List[Any] = vocab_size - 1
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __magic_name__ ( self ) -> str:
__magic_name__ : str = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : int = config_and_inputs
__magic_name__ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = 20
__magic_name__ : Dict = model_class_name(lowerCAmelCase__ )
__magic_name__ : Optional[int] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__magic_name__ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : str = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , )
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Any = 20
__magic_name__ : Tuple = model_class_name(lowerCAmelCase__ )
__magic_name__ : int = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__magic_name__ : Union[str, Any] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Dict = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : Union[str, Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__magic_name__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase__ : List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[str] = FlaxGPTJModelTester(self )
def __magic_name__ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@tooslow
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
__magic_name__ : Optional[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__magic_name__ : int = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : str = False
__magic_name__ : str = model.config.eos_token_id
__magic_name__ : Optional[int] = jax.jit(model.generate )
__magic_name__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
__magic_name__ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ : Any = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : int = pt_inputs["""input_ids"""].shape
__magic_name__ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : str = 0
__magic_name__ : Any = 1
__magic_name__ : Union[str, Any] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Dict = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
__magic_name__ : List[str] = fx_state
with torch.no_grad():
__magic_name__ : Dict = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : str = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : str = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
__magic_name__ : List[str] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : Optional[int] = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
__magic_name__ ,__magic_name__ : Union[str, Any] = pt_inputs["""input_ids"""].shape
__magic_name__ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : List[Any] = 0
__magic_name__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__magic_name__ : int = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : Tuple = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : List[Any] = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
with torch.no_grad():
__magic_name__ : Tuple = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __magic_name__ ( self ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ : Tuple = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 324
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
_A = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , _snake_case : Tuple=100 , _snake_case : List[str]=5 , _snake_case : Optional[int]=1 , _snake_case : Optional[int]=1 , _snake_case : Optional[int]=249 , _snake_case : Optional[Any]=6 , _snake_case : Optional[int]=17 , _snake_case : str=25 , _snake_case : Union[str, Any]=4 , _snake_case : Union[str, Any]=4 , _snake_case : Optional[Any]=128 , _snake_case : Any=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Tuple=0.0006 , _snake_case : List[Any]=512 , _snake_case : Union[str, Any]=0.02 , _snake_case : str=1e-12 , _snake_case : Tuple=1 , _snake_case : Tuple=True , _snake_case : str=1 , _snake_case : Any=50256 , _snake_case : str=50256 , **_snake_case : Dict , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 538
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_A = ['small', 'medium', 'large']
_A = 'lm_head.decoder.weight'
_A = 'lm_head.weight'
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = d.pop(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_A = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_A = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
_A = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 538
| 1
|
def _lowercase( __a : List[Any] ):
a__ =len(lowerCAmelCase_ )
while cur > 1:
# Find the maximum number in arr
a__ =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ =arr[mi::-1] + arr[mi + 1 : len(lowerCAmelCase_ )]
# Reverse whole list
a__ =arr[cur - 1 :: -1] + arr[cur : len(lowerCAmelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
_lowerCAmelCase: str = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: List[Any] = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 20
|
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = max_position_embeddings
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool:
__SCREAMING_SNAKE_CASE = input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = start_length
__SCREAMING_SNAKE_CASE = max_new_tokens
__SCREAMING_SNAKE_CASE = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict:
__SCREAMING_SNAKE_CASE = max_time
__SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool:
return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self )
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = stopping_criteria.max_length
__SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) )
return new_stopping_criteria
| 682
| 0
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'spiece.model'}
_A = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_A = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
_A = '▁'
class _lowercase ( __UpperCAmelCase ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_="</s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_=100 , UpperCAmelCase_=None , UpperCAmelCase_ = None , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase : Any = len(set(filter(lambda UpperCAmelCase_ : bool('extra_id' in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowerCamelCase : List[Any] = legacy
lowerCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : Optional[int] = extra_ids
lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCamelCase : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCAmelCase_ , )
return max_model_length
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.sp_model.get_piece_size() + self._extra_ids
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def _UpperCamelCase ( self ) -> Optional[Any]:
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(r'<extra_id_\d+>' , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def _UpperCamelCase ( self ) -> List[str]:
return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[int]:
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : Union[str, Any] = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase : Optional[Any] = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Tuple = self.__dict__.copy()
lowerCamelCase : List[str] = None
return state
def __setstate__( self , UpperCAmelCase_ ) -> Any:
lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase : Dict = {}
lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowerCamelCase : str = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , ' ' )
return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
if not self.legacy:
lowerCamelCase : Union[str, Any] = text.startswith(UpperCAmelCase_ )
if is_first:
lowerCamelCase : Optional[Any] = text[1:]
lowerCamelCase : str = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCAmelCase_ ):
lowerCamelCase : Optional[Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Any:
if token.startswith('<extra_id_' ):
lowerCamelCase : Dict = re.match(r'<extra_id_(\d+)>' , UpperCAmelCase_ )
lowerCamelCase : Tuple = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Union[str, Any]:
if index < self.sp_model.get_piece_size():
lowerCamelCase : Any = self.sp_model.IdToPiece(UpperCAmelCase_ )
else:
lowerCamelCase : Union[str, Any] = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : str = ''
lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[str] = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Dict = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , 'wb' ) as fi:
lowerCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 721
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : Optional[int] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(UpperCAmelCase_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : int = None
ops.enable_eager_execution_internal()
lowerCamelCase : List[str] = tf.config.list_physical_devices('CPU' )
if len(UpperCAmelCase_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase : Optional[int] = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase : Union[str, Any] = GradientAccumulator()
lowerCamelCase : Tuple = tf.Variable([4.0, 3.0] )
lowerCamelCase , lowerCamelCase : Any = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase : Dict = tf.Variable([0.0, 0.0] , trainable=UpperCAmelCase_ )
def accumulate_on_replica(UpperCAmelCase_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCAmelCase_ , UpperCAmelCase_ ):
with strategy.scope():
lowerCamelCase : List[str] = strategy.experimental_local_results(UpperCAmelCase_ )
local_variables[0].assign(UpperCAmelCase_ )
local_variables[1].assign(UpperCAmelCase_ )
strategy.run(UpperCAmelCase_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(UpperCAmelCase_ )
def _check_local_values(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase : str = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , UpperCAmelCase_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , UpperCAmelCase_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 133
| 0
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : int =JukeboxTokenizer
a : Optional[Any] ={
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase_: Optional[Any] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Union[str, Any] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase_: Any = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Any = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 57
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
UpperCamelCase_: Tuple = json.load(UpperCAmelCase__ )
UpperCamelCase_: List[str] = LukeConfig(use_entity_aware_attention=UpperCAmelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
UpperCamelCase_: Optional[int] = torch.load(UpperCAmelCase__ , map_location='cpu' )['module']
# Load the entity vocab file
UpperCamelCase_: Any = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
UpperCamelCase_: List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase_: Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase_: Any = AddedToken('<ent>' , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = AddedToken('<ent2>' , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , 'r' ) as f:
UpperCamelCase_: Union[str, Any] = json.load(UpperCAmelCase__ )
UpperCamelCase_: str = 'MLukeTokenizer'
with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
UpperCamelCase_: Any = tokenizer.convert_tokens_to_ids(['@'] )[0]
UpperCamelCase_: List[str] = tokenizer.convert_tokens_to_ids(['#'] )[0]
UpperCamelCase_: Tuple = state_dict['embeddings.word_embeddings.weight']
UpperCamelCase_: int = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase_: str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase_: Union[str, Any] = state_dict[bias_name]
UpperCamelCase_: Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase_: Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase_: List[Any] = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase_: str = state_dict[prefix + matrix_name]
UpperCamelCase_: Dict = state_dict[prefix + matrix_name]
UpperCamelCase_: Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase_: List[str] = state_dict['entity_embeddings.entity_embeddings.weight']
UpperCamelCase_: int = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase_: Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase_: Optional[Any] = state_dict['entity_predictions.bias']
UpperCamelCase_: List[str] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase_: List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase_: List[Any] = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
UpperCamelCase_: Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
UpperCamelCase_: Union[str, Any] = state_dict[key]
else:
UpperCamelCase_: Dict = state_dict[key]
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase_: Optional[int] = MLukeTokenizer.from_pretrained(UpperCAmelCase__ , task='entity_classification' )
UpperCamelCase_: Tuple = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
UpperCamelCase_: Optional[int] = (0, 9)
UpperCamelCase_: Union[str, Any] = tokenizer(UpperCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase_: str = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: int = torch.Size((1, 3_3, 7_6_8) )
UpperCamelCase_: Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: Dict = torch.Size((1, 1, 7_6_8) )
UpperCamelCase_: Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase_: str = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = 'Tokyo is the capital of <mask>.'
UpperCamelCase_: Dict = (2_4, 3_0)
UpperCamelCase_: int = tokenizer(UpperCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase_: Dict = model(**UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = encoding['input_ids'][0].tolist()
UpperCamelCase_: List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
UpperCamelCase_: Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase_: Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> int:
UpperCamelCase_: Optional[Any] = ['[MASK]', '[PAD]', '[UNK]']
UpperCamelCase_: Any = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
UpperCamelCase_: Tuple = {}
for entry in data:
UpperCamelCase_: Optional[int] = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase_: Union[str, Any] = entity_id
break
UpperCamelCase_: Dict = F'''{language}:{entity_name}'''
UpperCamelCase_: Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
A_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( a ):
"""simple docstring"""
A_ = ''
A_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A_ = None # compression type in fsspec. ex: "gzip"
A_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase )-> List[str]:
super().__init__(self , **_lowerCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ = fsspec.open(
_lowerCamelCase , mode='''rb''' , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowercase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ = None
@classmethod
def snake_case_( cls , _lowerCamelCase )-> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase ).lstrip('''/''' )
def snake_case_( self )-> List[str]:
if self.dir_cache is None:
lowercase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ = {f['''name''']: f}
def snake_case_( self , _lowerCamelCase )-> List[str]:
return self.file.open().read()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , )-> Tuple:
lowercase__ = self._strip_protocol(_lowerCamelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __A ( a ):
"""simple docstring"""
A_ = 'bz2'
A_ = 'bz2'
A_ = '.bz2'
class __A ( a ):
"""simple docstring"""
A_ = 'gzip'
A_ = 'gzip'
A_ = '.gz'
class __A ( a ):
"""simple docstring"""
A_ = 'lz4'
A_ = 'lz4'
A_ = '.lz4'
class __A ( a ):
"""simple docstring"""
A_ = 'xz'
A_ = 'xz'
A_ = '.xz'
class __A ( a ):
"""simple docstring"""
A_ = 'zstd'
A_ = 'zstd'
A_ = '.zst'
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , )-> Tuple:
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ = self.file.__enter__
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = file_
def __enter__( self )-> int:
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase )-> List[str]:
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase )
def __iter__( self )-> int:
return iter(self._file )
def snake_case_( self )-> List[Any]:
return next(self._file )
def __getattr__( self , _lowerCamelCase )-> Any:
return getattr(self._file , _lowerCamelCase )
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase ):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase ) )
lowercase__ = fixed_enter
| 318
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : Optional[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : str )-> int:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__snake_case = '''lm_head'''
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> str:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , _lowerCamelCase )
if "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
else:
__snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=True )-> Optional[int]:
'''simple docstring'''
if config_path is not None:
__snake_case = UniSpeechConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case = UniSpeechConfig()
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load_from_json(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 42
__snake_case = 43
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__snake_case = WavaVecaPhonemeCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , )
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case = UniSpeechForCTC(_lowerCamelCase )
else:
__snake_case = UniSpeechForPreTraining(_lowerCamelCase )
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__snake_case = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_unispeech.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 24
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Any = HfArgumentParser(__snake_case )
_lowerCamelCase : int = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case )
try:
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
_lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] )
_lowerCamelCase : Dict = """"""
_lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] )
_lowerCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
_lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main()
| 88
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
snake_case__ = '''\\n Text data.\n Second line of data.'''
snake_case__ = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase__ ( a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a__ :Dict = tmp_path_factory.mktemp("data" ) / (FILE_PATH + """.zstd""")
a__ :Tuple = bytes(snake_case_ , "utf-8" )
with zstd.open(snake_case_ , "wb" ) as f:
f.write(snake_case_ )
return path
@pytest.fixture
def lowerCamelCase__ ( a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , snake_case_ ) , "w" ) as f:
f.write(snake_case_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase__ ( a : Tuple , a : int , a : List[str] , a : List[Any] , a : Dict , a : Any ) -> Tuple:
"""simple docstring"""
a__ :Tuple = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
a__ :Optional[int] = input_paths[compression_format]
a__ :Any = tmp_path / """cache"""
a__ :List[str] = DownloadConfig(cache_dir=snake_case_ , extract_compressed_file=snake_case_ )
a__ :Union[str, Any] = cached_path(snake_case_ , download_config=snake_case_ )
with open(snake_case_ ) as f:
a__ :List[Any] = f.read()
with open(snake_case_ ) as f:
a__ :List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase__ ( a : str , a : List[Any] , a : Any , a : str , a : Tuple ) -> Dict:
"""simple docstring"""
a__ :Union[str, Any] = """custom_cache"""
a__ :Any = """custom_extracted_dir"""
a__ :Union[str, Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
a__ :int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , snake_case_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(snake_case_ ) )
a__ :List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
a__ :Dict = xz_file
a__ :Optional[int] = (
DownloadConfig(extract_compressed_file=snake_case_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case_ )
)
a__ :int = cached_path(snake_case_ , download_config=snake_case_ )
assert Path(snake_case_ ).parent.parts[-2:] == expected
def lowerCamelCase__ ( a : Dict ) -> Tuple:
"""simple docstring"""
# absolute path
a__ :int = str(Path(snake_case_ ).resolve() )
assert cached_path(snake_case_ ) == text_file
# relative path
a__ :str = str(Path(snake_case_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(snake_case_ ) == text_file
def lowerCamelCase__ ( a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
# absolute path
a__ :Union[str, Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(snake_case_ ):
cached_path(snake_case_ )
# relative path
a__ :Tuple = """./__missing_file__.txt"""
with pytest.raises(snake_case_ ):
cached_path(snake_case_ )
def lowerCamelCase__ ( a : Dict ) -> List[str]:
"""simple docstring"""
a__ :List[str] = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(snake_case_ ) as f:
a__ :int = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case_ )
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
with pytest.raises(snake_case_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case_ )
def lowerCamelCase__ ( a : Optional[int] ) -> str:
"""simple docstring"""
a__ :Any = tmp_path_factory.mktemp("data" ) / """file.html"""
with pytest.raises(snake_case_ ):
http_get("https://huggingface.co" , temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case_ )
def lowerCamelCase__ ( a : List[Any] ) -> Tuple:
"""simple docstring"""
a__ :str = tmp_path_factory.mktemp("data" ) / """file.html"""
with pytest.raises(snake_case_ ):
ftp_get("ftp://huggingface.co" , temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case_ )
def lowerCamelCase__ ( a : Union[str, Any] ) -> Dict:
"""simple docstring"""
a__ :Optional[int] = tmp_path_factory.mktemp("data" ) / """file.html"""
with pytest.raises(snake_case_ ):
fsspec_get("s3://huggingface.co" , temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
fsspec_head("s3://huggingface.co" )
| 710
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
snake_case__ = '''scheduler_config.json'''
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 10
lowerCamelCase_ = 11
lowerCamelCase_ = 12
lowerCamelCase_ = 13
lowerCamelCase_ = 14
@dataclass
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 42
class lowerCAmelCase_ :
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def _snake_case ( cls : Union[str, Any] , __A : Dict[str, Any] = None , __A : Optional[str] = None , __A : Optional[int]=False , **__A : Union[str, Any] , ) ->List[Any]:
"""simple docstring"""
a__ , a__ , a__ :List[Any] = cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , return_commit_hash=__A , **__A , )
return cls.from_config(__A , return_unused_kwargs=__A , **__A )
def _snake_case ( self : str , __A : Union[str, os.PathLike] , __A : bool = False , **__A : Optional[Any] ) ->str:
"""simple docstring"""
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def _snake_case ( self : List[Any] ) ->Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _snake_case ( cls : Dict ) ->int:
"""simple docstring"""
a__ :Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
a__ :Union[str, Any] = importlib.import_module(__name__.split("." )[0] )
a__ :Optional[int] = [
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
| 373
| 0
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCamelCase__ = get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : List[str] , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = (
os.path.join(a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase_ : List[str] = Extractor
def lowerCAmelCase__ ( self : str , a : str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase_ : Union[str, Any] = os.path.abspath(a )
return os.path.join(self.extract_dir , hash_url_to_filename(a ) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : str , a : bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(a ) and not (os.path.isdir(a ) and os.listdir(a ))
)
def lowerCAmelCase__ ( self : List[Any] , a : str , a : bool = False ):
'''simple docstring'''
lowercase_ : List[Any] = self.extractor.infer_extractor_format(a )
if not extractor_format:
return input_path
lowercase_ : str = self._get_output_path(a )
if self._do_extract(a , a ):
self.extractor.extract(a , a , a )
return output_path
class _UpperCAmelCase ( snake_case ):
@classmethod
@abstractmethod
def lowerCAmelCase__ ( cls : Tuple , a : Union[Path, str] , **a : List[Any] ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
...
class _UpperCAmelCase ( snake_case , snake_case ):
__lowerCamelCase: List[bytes] = []
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : int ):
'''simple docstring'''
with open(a , "rb" ) as f:
return f.read(a )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if not magic_number:
lowercase_ : int = max(len(a ) for cls_magic_number in cls.magic_numbers )
try:
lowercase_ : List[Any] = cls.read_magic_number(a , a )
except OSError:
return False
return any(magic_number.startswith(a ) for cls_magic_number in cls.magic_numbers )
class _UpperCAmelCase ( snake_case ):
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , a : Union[Path, str] , **a : Optional[Any] ):
'''simple docstring'''
return tarfile.is_tarfile(a )
@staticmethod
def lowerCAmelCase__ ( a : List[str] , a : int ):
'''simple docstring'''
def resolved(a : str ) -> str:
return os.path.realpath(os.path.abspath(a ) )
def badpath(a : str , a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a , a ) ).startswith(a )
def badlink(a : Any , a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase_ : Optional[int] = resolved(os.path.join(a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a )
lowercase_ : List[str] = resolved(a )
for finfo in members:
if badpath(finfo.name , a ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(a , a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(a , a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
lowercase_ : List[str] = tarfile.open(a )
tar_file.extractall(a , members=TarExtractor.safemembers(a , a ) )
tar_file.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = [b'\x1F\x8B']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with gzip.open(a , "rb" ) as gzip_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if super().is_extractable(a , magic_number=a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a , "rb" ) as fp:
lowercase_ : Optional[int] = _EndRecData(a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase_ : int = fp.read(a ) # CD is where we expect it to be
if len(a ) == sizeCentralDir:
lowercase_ : Union[str, Any] = struct.unpack(a , a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
with zipfile.ZipFile(a , "r" ) as zip_file:
zip_file.extractall(a )
zip_file.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[str] = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with lzma.open(a ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[int] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(a , exist_ok=a )
lowercase_ : Union[str, Any] = rarfile.RarFile(a )
rf.extractall(a )
rf.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = [b'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
lowercase_ : Union[str, Any] = zstd.ZstdDecompressor()
with open(a , "rb" ) as ifh, open(a , "wb" ) as ofh:
dctx.copy_stream(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = [b'\x42\x5A\x68']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with bza.open(a , "rb" ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(a , exist_ok=a )
with pyazr.SevenZipFile(a , "r" ) as archive:
archive.extractall(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = [b'\x04\x22\x4D\x18']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(a , "rb" ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__lowerCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ ( cls : Any ):
'''simple docstring'''
return max(
len(a )
for extractor in cls.extractors.values()
if issubclass(a , a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(a , magic_number_length=a )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ ( cls : int , a : Union[Path, str] , a : bool = False ):
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=a , )
lowercase_ : str = cls.infer_extractor_format(a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
lowercase_ : Any = cls._get_magic_number_max_length()
lowercase_ : Any = cls._read_magic_number(a , a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a , magic_number=a ):
return extractor_format
@classmethod
def lowerCAmelCase__ ( cls : List[str] , a : Union[Path, str] , a : Union[Path, str] , a : Optional[str] = None , a : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(a ) , exist_ok=a )
# Prevent parallel extractions
lowercase_ : int = str(Path(a ).with_suffix(".lock" ) )
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a , a ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=a , )
lowercase_ : Union[str, Any] = extractor if extractor != "deprecated" else extractor_format
else:
lowercase_ : Any = cls.extractors[extractor_format]
return extractor.extract(a , a )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a ):
return extractor.extract(a , a )
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __A ( UpperCamelCase__ ):
def __init__( self :Dict , __snake_case :NestedDataStructureLike[PathLike] , __snake_case :Optional[NamedSplit] = None , __snake_case :Optional[Features] = None , __snake_case :str = None , __snake_case :bool = False , __snake_case :bool = False , __snake_case :Optional[int] = None , **__snake_case :Optional[Any] , ):
'''simple docstring'''
super().__init__(
__snake_case , split=__snake_case , features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
__magic_name__ : str =path_or_paths if isinstance(__snake_case , __snake_case ) else {self.split: path_or_paths}
__magic_name__ : Union[str, Any] =Text(
cache_dir=__snake_case , data_files=__snake_case , features=__snake_case , **__snake_case , )
def A__ ( self :str ):
'''simple docstring'''
if self.streaming:
__magic_name__ : Tuple =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ : List[Any] =None
__magic_name__ : int =None
__magic_name__ : Optional[Any] =None
__magic_name__ : Optional[int] =None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
__magic_name__ : Dict =self.builder.as_dataset(
split=self.split , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
| 21
|
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21
| 1
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_lowercase : List[Any] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict ) -> List[Any]:
"""simple docstring"""
A = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
A = int(re.match(r""".*layer_(\d*).*""" , UpperCamelCase__ )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def _lowerCAmelCase ( UpperCamelCase__: List[Any] ) -> Tuple:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
A = re.search(r"""[^\d](\d+)$""" , str(UpperCamelCase__ ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
A = int(bit_search.groups()[0] )
return bit_size // 8
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ) -> int:
"""simple docstring"""
if bloom_config_file == "":
A = BloomConfig()
else:
A = BloomConfig.from_json_file(UpperCamelCase__ )
if shard_model:
A = os.listdir(UpperCamelCase__ )
A = sorted(filter(lambda UpperCamelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
A = {"""weight_map""": {}, """metadata""": {}}
A = 0
A = None
A = BloomConfig()
for j, file in enumerate(UpperCamelCase__ ):
print("""Processing file: {}""".format(UpperCamelCase__ ) )
A = None
for i in range(UpperCamelCase__ ):
# load all TP files
A = file.replace("""model_00""" , f'model_0{i}' )
A = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
A = list(temp.keys() )
for key in keys:
A = temp.pop(UpperCamelCase__ )
if tensors is None:
A = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase__ , os.path.join(
UpperCamelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
A = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
A = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) )
A = BloomConfig()
A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
A = total_size
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
A = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
else:
A = BloomModel(UpperCamelCase__ )
A = os.listdir(UpperCamelCase__ )
A = sorted(filter(lambda UpperCamelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
A = None
for i, file in enumerate(UpperCamelCase__ ):
A = None
for i in range(UpperCamelCase__ ):
# load all TP files
A = file.replace("""model_00""" , f'model_0{i}' )
A = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
A = list(temp.keys() )
for key in keys:
A = temp.pop(UpperCamelCase__ )
if tensors is None:
A = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A = tensors[key] / pretraining_tp
A = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
A = set(other_keys.missing_keys )
else:
A = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
A = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_lowercase : str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 546
|
import sys
from collections import defaultdict
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> Any:
A = []
def _UpperCAmelCase ( self , a__ ) -> List[str]:
return self.node_position[vertex]
def _UpperCAmelCase ( self , a__ , a__ ) -> Optional[int]:
A = pos
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ) -> Any:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a__ )
self.top_to_bottom(a__ , a__ , a__ , a__ )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] , a__ )
else:
A = val
A = temp
self.set_position(a__ , a__ )
break
A = parent
else:
A = val
A = temp
self.set_position(a__ , 0 )
def _UpperCAmelCase ( self , a__ , a__ ) -> Optional[int]:
A = len(a__ ) // 2 - 1
for i in range(a__ , -1 , -1 ):
self.top_to_bottom(a__ , a__ , len(a__ ) , a__ )
def _UpperCAmelCase ( self , a__ , a__ ) -> Any:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(a__ , 0 , len(a__ ) , a__ )
return temp
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> List[str]:
"""simple docstring"""
A = Heap()
A = [0] * len(UpperCamelCase__ )
A = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
A = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
A = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase : Tuple = int(input("Enter number of edges: ").strip())
_lowercase : int = defaultdict(list)
for _ in range(edges_number):
_lowercase : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 546
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = ['''input_features''', '''attention_mask''']
def __init__( self : List[Any] , __lowerCamelCase : str=8_0 , __lowerCamelCase : str=1_6_0_0_0 , __lowerCamelCase : List[str]=8_0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
_snake_case = num_mel_bins
_snake_case = do_ceptral_normalize
_snake_case = normalize_means
_snake_case = normalize_vars
_snake_case = True
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : np.ndarray , ):
"""simple docstring"""
_snake_case = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_snake_case = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
_snake_case = ta_kaldi.fbank(__lowerCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : float = 0.0 , ):
"""simple docstring"""
# make sure we normalize float32 arrays
if normalize_means:
_snake_case = x[:input_length].mean(axis=0 )
_snake_case = np.subtract(__lowerCamelCase , __lowerCamelCase )
if normalize_vars:
_snake_case = x[:input_length].std(axis=0 )
_snake_case = np.divide(__lowerCamelCase , __lowerCamelCase )
if input_length < x.shape[0]:
_snake_case = padding_value
# make sure array is in float32
_snake_case = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : Optional[np.ndarray] = None ):
"""simple docstring"""
_snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCamelCase , __lowerCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCamelCase , __lowerCamelCase )
]
def __call__( self : int , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_snake_case = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_snake_case = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
_snake_case = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case = [raw_speech]
# extract fbank features
_snake_case = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
_snake_case = BatchFeature({'''input_features''': features} )
_snake_case = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
# make sure list is in array format
_snake_case = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowerCamelCase ):
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
_snake_case = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_snake_case = (
np.array(__lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCamelCase , max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_snake_case = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowerCamelCase )
if return_tensors is not None:
_snake_case = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 103
|
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 0
|
import json
import sys
def A ( snake_case__ : List[Any] , snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case__ , encoding='utf-8' ) as f:
__snake_case = json.load(snake_case__ )
__snake_case = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(snake_case__ ):
__snake_case = results[benchmark_name]
__snake_case = benchmark_name.split('/' )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
__snake_case = '| metric |'
__snake_case = '|--------|'
__snake_case = '| new / old (diff) |'
for metric_name in sorted(snake_case__ ):
__snake_case = benchmark_res[metric_name]
__snake_case = metric_vals['new']
__snake_case = metric_vals.get('old' , snake_case__ )
__snake_case = metric_vals.get('diff' , snake_case__ )
__snake_case = f" {new_val:f}" if isinstance(snake_case__ , (int, float) ) else 'None'
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(snake_case__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(snake_case__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(snake_case__ ) )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = sys.argv[1]
UpperCAmelCase__ : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 720
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676
| 0
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 120
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = """segformer"""
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=[2, 2, 2, 2] , lowerCamelCase_=[8, 4, 2, 1] , lowerCamelCase_=[3_2, 6_4, 1_6_0, 2_5_6] , lowerCamelCase_=[7, 3, 3, 3] , lowerCamelCase_=[4, 2, 2, 2] , lowerCamelCase_=[1, 2, 5, 8] , lowerCamelCase_=[4, 4, 4, 4] , lowerCamelCase_="gelu" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_5 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowerCamelCase_ , )
_a : Union[str, Any] = num_channels
_a : Any = num_encoder_blocks
_a : Union[str, Any] = depths
_a : int = sr_ratios
_a : List[str] = hidden_sizes
_a : Tuple = patch_sizes
_a : Any = strides
_a : List[Any] = mlp_ratios
_a : str = num_attention_heads
_a : str = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Any = classifier_dropout_prob
_a : Optional[Any] = initializer_range
_a : int = drop_path_rate
_a : int = layer_norm_eps
_a : Optional[Any] = decoder_hidden_size
_a : int = kwargs.get('reshape_last_stage' , lowerCamelCase_ )
_a : str = semantic_loss_ignore_index
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = version.parse("""1.11""" )
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCamelCase ( self ) -> float:
return 1e-4
@property
def __UpperCamelCase ( self ) -> int:
return 1_2
| 120
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowerCAmelCase = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703
|
def _lowercase ( a__ : str ) -> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(a__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 589
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( A__ = 10 , A__ = 22 ):
a_ = range(1 , A__ )
a_ = range(1 , A__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 263
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase__ =version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def UpperCamelCase_ ( A__ , A__ , A__ , A__ = False ):
a_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a_ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
a_ = """cpu"""
a_ = Path(A__ )
# VAE DECODER
a_ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
a_ = vae_decoder.config.latent_channels
# forward only through the decoder part
a_ = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowercase__ =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 263
| 1
|
import math
def lowerCamelCase ( UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = 0
SCREAMING_SNAKE_CASE__ :Dict = 0
while num > 0:
SCREAMING_SNAKE_CASE__ :Any = num % 8
SCREAMING_SNAKE_CASE__ :Any = octal + (remainder * math.floor(math.pow(1_0 , UpperCAmelCase__ ) ))
counter += 1
SCREAMING_SNAKE_CASE__ :List[str] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(UpperCAmelCase__ )}'''
def lowerCamelCase ( ) -> None:
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(6_5 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_1_6 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 700
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : int = LongformerTokenizer
A_ : int = True
A_ : Optional[Any] = LongformerTokenizerFast
A_ : Tuple = True
def __lowerCamelCase ( self : int ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ :Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE__ :Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase_ ) )
def __lowerCamelCase ( self : Tuple , **UpperCamelCase_ : Union[str, Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ :Any = 'lower newer'
SCREAMING_SNAKE_CASE__ :Optional[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ :List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ :Any = 'Encode this sequence.'
SCREAMING_SNAKE_CASE__ :int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE__ :Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE__ :Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE__ :Optional[Any] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[str]:
pass
def __lowerCamelCase ( self : List[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE__ :str = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE__ :int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE__ :str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __lowerCamelCase ( self : Dict ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE__ :str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Tuple = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE__ :Any = f'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
| 320
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 566
|
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase__ ( *__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase__ ( __UpperCamelCase : Exception ):
'''simple docstring'''
__lowercase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase__ ( __UpperCamelCase : callable = None , __UpperCamelCase : int = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
__lowercase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 566
| 1
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCamelCase : str , lowerCamelCase : str ) -> bool:
"""simple docstring"""
__magic_name__ : str = len(lowerCamelCase )
__magic_name__ : str = len(lowerCamelCase )
__magic_name__ : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__magic_name__ : List[Any] = True
for i in range(lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__magic_name__ : List[str] = True
if a[i].islower():
__magic_name__ : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = 'xlm-prophetnet'
snake_case_ = ['past_key_values']
snake_case_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self : Tuple , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 3_0522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : List[str] , ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = vocab_size
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : Any = encoder_ffn_dim
__magic_name__ : str = num_encoder_layers
__magic_name__ : List[str] = num_encoder_attention_heads
__magic_name__ : Dict = decoder_ffn_dim
__magic_name__ : int = num_decoder_layers
__magic_name__ : str = num_decoder_attention_heads
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = init_std # Normal(0, this parameter)
__magic_name__ : Optional[int] = activation_function
# parameters for xlmprophetnet
__magic_name__ : int = ngram
__magic_name__ : List[Any] = num_buckets
__magic_name__ : int = relative_max_distance
__magic_name__ : List[str] = disable_ngram_loss
__magic_name__ : Union[str, Any] = eps
# 3 Types of Dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : Optional[int] = dropout
__magic_name__ : Dict = use_cache
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , )
@property
def _UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCAmelCase ( self : List[Any] , snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 147
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
SCREAMING_SNAKE_CASE :int = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = "https://pypi.org/pypi/diffusers/json"
__A = json.loads(request.urlopen(a_ ).read() )["releases"].keys()
return sorted(a_ , key=lambda a_ : version.Version(a_ ) )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a_ )
os.makedirs(a_ , exist_ok=a_ )
__A = Path(a_ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
init_hf_modules()
__A = Path(a_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a_ , exist_ok=a_ )
__A = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" ) as f:
__A = f.read()
# Imports of the form `import .xxx`
__A = re.findall("^\s*import\s+\.(\S+)\s*$" , a_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , a_ , flags=re.MULTILINE )
# Unique-ify
return list(set(a_ ) )
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = False
__A = [module_file]
__A = []
# Let's recurse through all relative imports
while not no_change:
__A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a_ ) )
__A = Path(a_ ).parent
__A = [str(module_path / m ) for m in new_imports]
__A = [f for f in new_import_files if f not in all_relative_imports]
__A = [F'''{f}.py''' for f in new_import_files]
__A = len(a_ ) == 0
all_relative_imports.extend(a_ )
return all_relative_imports
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" ) as f:
__A = f.read()
# Imports of the form `import xxx`
__A = re.findall("^\s*import\s+(\S+)\s*$" , a_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , a_ , flags=re.MULTILINE )
# Only keep the top-level module
__A = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
__A = list(set(a_ ) )
__A = []
for imp in imports:
try:
importlib.import_module(a_ )
except ImportError:
missing_packages.append(a_ )
if len(a_ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'''{', '.join(a_ )}. Run `pip install {' '.join(a_ )}`''' )
return get_relative_imports(a_ )
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
__A = module_path.replace(os.path.sep , "." )
__A = importlib.import_module(a_ )
if class_name is None:
return find_pipeline_class(a_ )
return getattr(a_ , a_ )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
__A = dict(inspect.getmembers(a_ , inspect.isclass ) )
__A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a_ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
__A = cls
return pipeline_class
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , ) -> Optional[Any]:
"""simple docstring"""
__A = str(a_ )
__A = os.path.join(a_ , a_ )
if os.path.isfile(a_ ):
__A = module_file_or_url
__A = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
__A = get_diffusers_versions()
# cut ".dev0"
__A = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
__A = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
__A = F'''v{revision}'''
elif revision == "main":
__A = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
__A = COMMUNITY_PIPELINES_URL.format(revision=a_ , pipeline=a_ )
try:
__A = cached_download(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
__A = "git"
__A = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
__A = hf_hub_download(
a_ , a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
__A = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
__A = check_imports(a_ )
# Now we move the module inside our cached dynamic modules.
__A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a_ )
__A = Path(a_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a_ , submodule_path / module_file )
for module_needed in modules_needed:
__A = F'''{module_needed}.py'''
shutil.copy(os.path.join(a_ , a_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a_ , a_ ):
__A = use_auth_token
elif use_auth_token is True:
__A = HfFolder.get_token()
else:
__A = None
__A = model_info(a_ , revision=a_ , token=a_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A = submodule_path / commit_hash
__A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a_ )
if not (submodule_path / module_file).exists():
shutil.copy(a_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a_ , F'''{module_needed}.py''' , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return os.path.join(a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , **a_ , ) -> Any:
"""simple docstring"""
__A = get_cached_module_file(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return get_class_in_module(a_ , final_module.replace(".py" , "" ) )
| 55
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_ ( A__ : str , A__ : str = "cpu" , A__ : Union[str, None] = None ):
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.load(A__ , map_location=A__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A__ , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCAmelCase_ : List[str] = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : List[str] = src_path
torch.save(A__ , A__ )
if __name__ == "__main__":
fire.Fire(convert)
| 275
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :str ) -> Optional[int]:
def get_masked_lm_array(UpperCamelCase__ :str ):
snake_case__ : Optional[int] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case__ : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
snake_case__ : Optional[int] = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(UpperCamelCase__ :str ):
snake_case__ : Optional[int] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case__ : List[str] = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
snake_case__ : Dict = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(UpperCamelCase__ :int , UpperCamelCase__ :str ):
snake_case__ : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case__ : Any = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
snake_case__ : Any = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(UpperCamelCase__ :int , UpperCamelCase__ :str , UpperCamelCase__ :List[str] ):
snake_case__ : int = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
snake_case__ : Tuple = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : int = array.reshape(UpperCamelCase__ )
if "kernel" in name:
snake_case__ : List[Any] = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(F'''Loading model based on config from {config_path}...''' )
snake_case__ : Optional[int] = BertConfig.from_json_file(UpperCamelCase__ )
snake_case__ : Optional[int] = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
snake_case__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : int = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
snake_case__ : Tuple = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
snake_case__ : List[str] = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
snake_case__ : Tuple = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
snake_case__ : List[str] = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
snake_case__ : Union[str, Any] = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Tuple = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
snake_case__ : Dict = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
snake_case__ : List[Any] = get_encoder_layer_array(UpperCamelCase__ , '''_attention_layer_norm/gamma''' )
snake_case__ : Union[str, Any] = get_encoder_layer_array(UpperCamelCase__ , '''_attention_layer_norm/beta''' )
# Intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : Union[str, Any] = get_encoder_layer_array(UpperCamelCase__ , '''_intermediate_dense/kernel''' )
snake_case__ : Tuple = get_encoder_layer_array(UpperCamelCase__ , '''_intermediate_dense/bias''' )
# Output
snake_case__ : BertOutput = layer.output
snake_case__ : List[str] = get_encoder_layer_array(UpperCamelCase__ , '''_output_dense/kernel''' )
snake_case__ : int = get_encoder_layer_array(UpperCamelCase__ , '''_output_dense/bias''' )
snake_case__ : List[Any] = get_encoder_layer_array(UpperCamelCase__ , '''_output_layer_norm/gamma''' )
snake_case__ : Dict = get_encoder_layer_array(UpperCamelCase__ , '''_output_layer_norm/beta''' )
# Embeddings
snake_case__ : List[str] = get_encoder_array('''_position_embedding_layer/embeddings''' )
snake_case__ : Tuple = get_encoder_array('''_type_embedding_layer/embeddings''' )
snake_case__ : Dict = get_encoder_array('''_embedding_norm_layer/gamma''' )
snake_case__ : str = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
snake_case__ : Union[str, Any] = model.cls.predictions.transform
snake_case__ : Optional[int] = get_masked_lm_array('''dense/kernel''' )
snake_case__ : Dict = get_masked_lm_array('''dense/bias''' )
snake_case__ : List[Any] = get_masked_lm_array('''layer_norm/gamma''' )
snake_case__ : Tuple = get_masked_lm_array('''layer_norm/beta''' )
snake_case__ : Any = get_masked_lm_array('''embedding_table''' )
# Pooling
snake_case__ : int = BertPooler(config=UpperCamelCase__ )
snake_case__ : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
snake_case__ : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
snake_case__ : str = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowercase : int =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 710
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowercase : Tuple ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( UpperCamelCase__ :List[str] , UpperCamelCase__ :Any , UpperCamelCase__ :int , UpperCamelCase__ :Dict=None ) -> int:
# Initialise PyTorch model
snake_case__ : List[Any] = XLNetConfig.from_json_file(UpperCamelCase__ )
snake_case__ : Optional[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
snake_case__ : Union[str, Any] = finetuning_task
snake_case__ : str = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case__ : List[Any] = XLNetForSequenceClassification(UpperCamelCase__ )
elif "squad" in finetuning_task:
snake_case__ : str = finetuning_task
snake_case__ : List[str] = XLNetForQuestionAnswering(UpperCamelCase__ )
else:
snake_case__ : Tuple = XLNetLMHeadModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
snake_case__ : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_lowercase : Optional[Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 574
| 0
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def A_( A : Any , A : List[Any]):
UpperCamelCase = int(A)
assert noofclusters < len(A)
# Find out the dimensionality
UpperCamelCase = len(vectors[0])
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(A)))
shuffle(A)
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]]) for i in range(A)
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim])
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(A , A))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0) for i in range(len(A))]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32')
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(A , A))
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim])
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(A , 0)
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim])
UpperCamelCase = tf.placeholder('float' , [dim])
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(A , A) , 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters])
UpperCamelCase = tf.argmin(A , 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(A)
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(A):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(A)):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(A , feed_dict={va: vect, va: sess.run(A)})
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
A , feed_dict={centroid_distances: distances})
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment})
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(A):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(A))
if sess.run(assignments[i]) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
A , feed_dict={mean_input: array(A)})
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location})
# Return centroids and assignments
UpperCamelCase = sess.run(A)
UpperCamelCase = sess.run(A)
return centroids, assignments
| 3
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51
| 0
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowercase__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
lowercase__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
lowercase__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , lowercase_ : Any=False , lowercase_ : Union[str, Any]=False , lowercase_ : str=False , ) -> int:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase : Optional[int] = np.array([re.sub(lowercase_ , '' , lowercase_ ) for x in predictions] )
UpperCAmelCase : Tuple = np.array([re.sub(lowercase_ , '' , lowercase_ ) for x in references] )
else:
UpperCAmelCase : str = np.asarray(lowercase_ )
UpperCAmelCase : List[str] = np.asarray(lowercase_ )
if ignore_case:
UpperCAmelCase : Dict = np.char.lower(lowercase_ )
UpperCAmelCase : str = np.char.lower(lowercase_ )
if ignore_punctuation:
UpperCAmelCase : Tuple = string.punctuation.maketrans('' , '' , string.punctuation )
UpperCAmelCase : Union[str, Any] = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Optional[int] = np.char.translate(lowercase_ , table=lowercase_ )
if ignore_numbers:
UpperCAmelCase : Union[str, Any] = string.digits.maketrans('' , '' , string.digits )
UpperCAmelCase : Tuple = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Any = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Union[str, Any] = predictions == references
return {"exact_match": np.mean(lowercase_ ) * 100}
| 714
|
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695
| 0
|
import random
def UpperCAmelCase_ ( _UpperCAmelCase :Any , _UpperCAmelCase :Tuple , _UpperCAmelCase :int ) -> Union[str, Any]:
'''simple docstring'''
A_ = a[left_index]
A_ = left_index + 1
for j in range(left_index + 1 , _UpperCAmelCase ):
if a[j] < pivot:
A_ , A_ = a[i], a[j]
i += 1
A_ , A_ = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase_ ( _UpperCAmelCase :List[Any] , _UpperCAmelCase :int , _UpperCAmelCase :List[Any] ) -> Any:
'''simple docstring'''
if left < right:
A_ = random.randint(_UpperCAmelCase , right - 1 )
A_ , A_ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A_ = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
quick_sort_random(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCAmelCase , pivot_index + 1 , _UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
A_ = input('''Enter numbers separated by a comma:\n''' ).strip()
A_ = [int(_UpperCAmelCase ) for item in user_input.split(''',''' )]
quick_sort_random(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 188
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
a__ : Any = logging.getLogger()
def UpperCAmelCase_ ( ) -> Any:
'''simple docstring'''
A_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A_ = parser.parse_args()
return args.f
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] ) -> str:
'''simple docstring'''
A_ = {}
A_ = os.path.join(_UpperCAmelCase , '''all_results.json''' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' ) as f:
A_ = json.load(_UpperCAmelCase )
else:
raise ValueError(f'can\'t find {path}' )
return results
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
a__ : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
@classmethod
def __UpperCAmelCase ( cls ):
"""simple docstring"""
A_ = tempfile.mkdtemp()
A_ = os.path.join(cls.tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
A_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls ):
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertLess(result['''perplexity'''] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertLess(result['''perplexity'''] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = 7 if get_gpu_count() > 1 else 2
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75 )
self.assertLess(result['''train_loss'''] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] ,2_8 )
self.assertGreaterEqual(result['''eval_exact'''] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] ,2 )
self.assertGreaterEqual(result['''eval_rougeL'''] ,7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_bleu'''] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''translation_no_trainer''' ) ) )
@slow
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__snake_case )
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.10 )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
A_ = get_results(__snake_case )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'''image_classification_no_trainer''' ) ) )
| 188
| 1
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A_ : Any = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
A_ : str = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = "rougeLsum"
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = ["rouge1", "rouge2", "rougeL"]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
a = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
a = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase__ )["rougeLsum"]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Path("examples/seq2seq/test_data/wmt_en_ro" )
a = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 32
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase__ ( _lowercase : int , _lowercase : int , _lowercase : bool , _lowercase : list[int] , _lowercase : float ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
)
def UpperCamelCase__ ( ) -> None:
__UpperCAmelCase: List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__UpperCAmelCase: int = math.log(len(_lowercase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowercase , _lowercase , _lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 523
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a :
"""simple docstring"""
def __init__( self , snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = parent
__UpperCAmelCase: Dict = 13
__UpperCAmelCase: Optional[int] = 7
__UpperCAmelCase: List[str] = 30
__UpperCAmelCase: List[Any] = self.seq_length + self.mem_len
__UpperCAmelCase: int = 15
__UpperCAmelCase: Optional[int] = True
__UpperCAmelCase: List[str] = True
__UpperCAmelCase: Union[str, Any] = 99
__UpperCAmelCase: Optional[int] = [10, 50, 80]
__UpperCAmelCase: str = 32
__UpperCAmelCase: Optional[Any] = 32
__UpperCAmelCase: Union[str, Any] = 4
__UpperCAmelCase: int = 8
__UpperCAmelCase: str = 128
__UpperCAmelCase: str = 2
__UpperCAmelCase: Tuple = 2
__UpperCAmelCase: Union[str, Any] = None
__UpperCAmelCase: str = 1
__UpperCAmelCase: Optional[Any] = 0
__UpperCAmelCase: int = 3
__UpperCAmelCase: Dict = self.vocab_size - 1
__UpperCAmelCase: int = 0.0_1
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: List[str] = None
if self.use_labels:
__UpperCAmelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: Optional[int] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase_ ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Dict = TFTransfoXLModel(snake_case_ )
__UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple()
__UpperCAmelCase: Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a}
__UpperCAmelCase, __UpperCAmelCase: Optional[Any] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = TFTransfoXLLMHeadModel(snake_case_ )
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = model(snake_case_ ).to_tuple()
__UpperCAmelCase: Optional[Any] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
__UpperCAmelCase, __UpperCAmelCase: Tuple = model(snake_case_ ).to_tuple()
__UpperCAmelCase, __UpperCAmelCase: Dict = model([input_ids_a, mems_a] ).to_tuple()
__UpperCAmelCase: Union[str, Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
__UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = TFTransfoXLForSequenceClassification(snake_case_ )
__UpperCAmelCase: List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.prepare_config_and_inputs()
((__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase)): Dict = config_and_inputs
__UpperCAmelCase: List[str] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase = () if is_tf_available() else ()
__lowerCAmelCase = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = TFTransfoXLModelTester(self )
__UpperCAmelCase: Any = ConfigTester(self , config_class=snake_case_ , d_embed=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
__UpperCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
__UpperCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__UpperCAmelCase: int = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__UpperCAmelCase: Any = model.get_output_embeddings()
assert isinstance(snake_case_ , tf.keras.layers.Layer )
__UpperCAmelCase: int = model.get_bias()
assert name is None
else:
__UpperCAmelCase: Optional[int] = model.get_output_embeddings()
assert x is None
__UpperCAmelCase: str = model.get_bias()
assert name is None
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase: str = TFTransfoXLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
__UpperCAmelCase: str = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__UpperCAmelCase: Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__UpperCAmelCase: Dict = model.generate(snake_case_ , max_length=200 , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_ )
| 523
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase__ =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCAmelCase__ ={
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
UpperCAmelCase__ ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase__ ="facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCAmelCase__ ="allenai"
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__lowercase = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase__ ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__lowercase = d[k] # restore
return da
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
assert os.path.exists(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__lowercase = basename(UpperCamelCase__ )
__lowercase = dirname(UpperCamelCase__ )
__lowercase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__lowercase = cls.hub_models()
__lowercase = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__lowercase = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
__lowercase = hub_utils.from_pretrained(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , archive_map=UpperCamelCase__ , **UpperCamelCase__ )
__lowercase = vars(chkpt["""args"""]["""model"""] )
__lowercase = args["""source_lang"""]
__lowercase = args["""target_lang"""]
__lowercase = dirname(UpperCamelCase__ )
__lowercase = basename(UpperCamelCase__ )
# dicts
__lowercase = os.path.join(UpperCamelCase__ , f'''dict.{src_lang}.txt''' )
__lowercase = os.path.join(UpperCamelCase__ , f'''dict.{tgt_lang}.txt''' )
__lowercase = Dictionary.load(UpperCamelCase__ )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(UpperCamelCase__ )
__lowercase = os.path.join(UpperCamelCase__ , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__lowercase = True
for k in src_vocab.keys():
if not k.islower():
__lowercase = False
break
__lowercase = Dictionary.load(UpperCamelCase__ )
__lowercase = rewrite_dict_keys(tgt_dict.indices )
__lowercase = len(UpperCamelCase__ )
__lowercase = os.path.join(UpperCamelCase__ , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
break
with open(UpperCamelCase__ , encoding="""utf-8""" ) as fin:
__lowercase = fin.read()
__lowercase = re.sub(R""" \d+$""" , """""" , UpperCamelCase__ , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase__ )
# model config
__lowercase = os.path.join(UpperCamelCase__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
__lowercase = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__lowercase = 5
__lowercase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__lowercase = best_score_hparams[model_dir]["""length_penalty"""]
else:
__lowercase = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
__lowercase = chkpt["""models"""][0]
__lowercase = model.state_dict()
# rename keys to start with 'model.'
__lowercase = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__lowercase = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = FSMTConfig.from_pretrained(UpperCamelCase__ )
__lowercase = FSMTForConditionalGeneration(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
# save
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 718
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCamelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCamelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("""--learning_rate""" , type=UpperCamelCase__ , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCamelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCamelCase__ , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCamelCase__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCamelCase__ , default="""./results""" )
return parser.parse_args()
UpperCAmelCase__ =load("accuracy")
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase , __lowercase = eval_pred
__lowercase = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
class lowerCamelCase__ ( _a ):
def __init__( self : Any , A_ : int ):
'''simple docstring'''
super().__init__()
__lowercase = trainer
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : int , A_ : str , A_ : Any , **A_ : Optional[int] ):
'''simple docstring'''
if control.should_evaluate:
__lowercase = deepcopy(A_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = get_args()
set_seed(args.seed )
__lowercase = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__lowercase = dataset.train_test_split(test_size=0.2 )
__lowercase = train_test["""test"""].train_test_split(test_size=0.5 )
__lowercase = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__lowercase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowercase = tokenizer.eos_token
__lowercase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__lowercase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowercase = False
__lowercase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCamelCase__ : Optional[Any] ):
__lowercase = tokenizer(example["""src"""] , truncation=UpperCamelCase__ , max_length=1024 )
__lowercase = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowercase = train_test_validation.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
__lowercase = DataCollatorWithPadding(tokenizer=UpperCamelCase__ )
__lowercase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__lowercase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 442
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCamelCase ( ) -> str:
_a : str = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
_a : int = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return image
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[str]:
_a : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : Optional[Any] = dct.pop(a_ )
_a : List[Any] = val
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_a : Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
_a : Optional[int] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
_a : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(a_ , requires_grad=a_ ), v_bias) )
_a : str = qkv_bias
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
_a : Union[str, Any] = 364 if '''coco''' in model_name else 224
_a : Optional[int] = InstructBlipVisionConfig(image_size=a_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_a : Dict = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_a : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_a : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
_a : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_a : List[Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
_a : List[str] = InstructBlipConfig(vision_config=a_ , text_config=a_ , qformer_config=a_ )
return config, image_size
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> List[str]:
_a : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
_a : Optional[int] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_a : Dict = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
_a : int = get_blipa_config(a_ )
_a : Optional[Any] = InstructBlipForConditionalGeneration(a_ ).eval()
_a : List[Any] = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
_a : Any = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_a : Dict = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
_a : str = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
_a : str = load_model_and_preprocess(
name=a_ , model_type=a_ , is_eval=a_ , device=a_ )
original_model.eval()
print('Done!' )
# update state dict keys
_a : Dict = original_model.state_dict()
_a : List[str] = create_rename_keys(a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_a : Union[str, Any] = state_dict.pop(a_ )
if key.startswith('Qformer.bert' ):
_a : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_a : Tuple = key.replace('self' , 'attention' )
if "llm_proj" in key:
_a : Optional[Any] = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
_a : int = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
_a : List[Any] = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
_a : Optional[Any] = key.replace('t5' , 'language' )
_a : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(a_ , a_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(a_ , strict=a_ )
_a : str = load_demo_image()
_a : Tuple = '''What is unusual about this image?'''
# create processor
_a : Union[str, Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a_ , image_std=a_ )
_a : Optional[int] = InstructBlipProcessor(
image_processor=a_ , tokenizer=a_ , qformer_tokenizer=a_ , )
_a : Any = processor(images=a_ , text=a_ , return_tensors='pt' ).to(a_ )
# make sure processor creates exact same pixel values
_a : Any = vis_processors['''eval'''](a_ ).unsqueeze(0 ).to(a_ )
_a : Tuple = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , a_ )
original_model.to(a_ )
hf_model.to(a_ )
with torch.no_grad():
if "vicuna" in model_name:
_a : Optional[Any] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
_a : List[str] = hf_model(**a_ ).logits
else:
_a : Union[str, Any] = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
_a : Union[str, Any] = tokenizer('\n' , return_tensors='pt' ).input_ids.to(a_ )
_a : Tuple = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
_a : List[Any] = hf_model(**a_ , labels=a_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_a : List[Any] = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , a_ , atol=a_ )
print('Looks ok!' )
print('Generating with original model...' )
_a : Optional[int] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
_a : int = hf_model.generate(
**a_ , do_sample=a_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_a : Any = 2
print('Original generation:' , a_ )
_a : List[Any] = processor.batch_decode(a_ , skip_special_tokens=a_ )
_a : Union[str, Any] = [text.strip() for text in output_text]
print('HF generation:' , a_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if push_to_hub:
processor.push_to_hub(f"""Salesforce/{model_name}""" )
hf_model.push_to_hub(f"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 358
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase ( a_ : Optional[int] , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE :int = sorted(zip(a_ , a_ ) , key=lambda a_ : x[0] / x[1] , reverse=a_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = [i[0] for i in r], [i[1] for i in r]
__SCREAMING_SNAKE_CASE :Union[str, Any] = list(accumulate(a_ ) )
__SCREAMING_SNAKE_CASE :Dict = bisect(a_ , a_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a :Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowercase ( __lowerCAmelCase ) -> str:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : Any = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : Any = 0
# Doctest custom flag to ignore output.
a :Tuple = doctest.register_optionflag("IGNORE_RESULT")
a :Optional[int] = doctest.OutputChecker
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _a , _a , _a )
a :List[Any] = CustomOutputChecker
a :List[Any] = HfDoctestModule
a :Tuple = HfDocTestParser
| 12
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a :Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_SCREAMING_SNAKE_CASE :str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowercase ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
datasets.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE__ : str = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = train_dataset.features["""label"""].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : int = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE__ : int = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.features["""label"""].names
# Labels
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase ): label for i, label in enumerate(__lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE__ : str = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def preprocess_function(__lowerCAmelCase ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE__ : str = train_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : List[str] = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : Any = min(len(__lowerCAmelCase ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : List[str] = eval_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE__ : int = min(len(__lowerCAmelCase ) , data_args.max_predict_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = predict_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
SCREAMING_SNAKE_CASE__ : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE__ : List[Any] = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE__ : int = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Dict = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_checkpoint
SCREAMING_SNAKE_CASE__ : str = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = train_result.metrics
SCREAMING_SNAKE_CASE__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE__ : Dict = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCAmelCase )
trainer.save_metrics("""train""" , __lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE__ : Any = trainer.evaluate(eval_dataset=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("""eval""" , __lowerCAmelCase )
trainer.save_metrics("""eval""" , __lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = trainer.predict(__lowerCAmelCase , metric_key_prefix="""predict""" )
SCREAMING_SNAKE_CASE__ : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE__ : int = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("""predict""" , __lowerCAmelCase )
trainer.save_metrics("""predict""" , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = np.argmax(__lowerCAmelCase , axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 12
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a :
"""simple docstring"""
def __init__( self , snake_case=2 , snake_case=3 , snake_case=64 , snake_case=None ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.random.default_rng(__UpperCamelCase )
lowerCAmelCase__ : Dict = length
lowerCAmelCase__ : Any = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ : Tuple = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self , snake_case ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , snake_case=0 , snake_case=0 , snake_case=False ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ : Optional[Any] = True
def SCREAMING_SNAKE_CASE_ ( self , snake_case=None ):
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ : str = False
return x * self.a[0] + self.b[0]
class __a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , snake_case=0 , snake_case=0 , snake_case=False ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : List[Any] = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() )
lowerCAmelCase__ : int = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() )
lowerCAmelCase__ : Optional[int] = True
def SCREAMING_SNAKE_CASE_ ( self , snake_case=None ):
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ : int = False
return x * self.a + self.b
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = 1_6 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ : str = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
lowerCAmelCase__ : Optional[Any] = load_dataset("csv" , data_files=UpperCamelCase__ )
lowerCAmelCase__ : List[str] = datasets["train"].unique("label" )
lowerCAmelCase__ : int = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Any = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" )
if "label" in examples:
lowerCAmelCase__ : Dict = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : str = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase__ : Dict = DataLoader(tokenized_datasets["train"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
lowerCAmelCase__ : Tuple = DataLoader(tokenized_datasets["validation"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 453
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( snake_case_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self: Tuple ) -> str:
__magic_name__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , "num_attention_heads" ) )
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Optional[int]=13 , __UpperCamelCase: Any=32 , __UpperCamelCase: Dict=2 , __UpperCamelCase: Dict=3 , __UpperCamelCase: Any=640 , __UpperCamelCase: str=4 , __UpperCamelCase: Optional[int]="silu" , __UpperCamelCase: Union[str, Any]=3 , __UpperCamelCase: Optional[int]=32 , __UpperCamelCase: Tuple=0.1 , __UpperCamelCase: List[Any]=0.1 , __UpperCamelCase: Tuple=0.1 , __UpperCamelCase: int=0.0_2 , __UpperCamelCase: Dict=True , __UpperCamelCase: int=True , __UpperCamelCase: Any=10 , __UpperCamelCase: Tuple=None , ) -> Union[str, Any]:
__magic_name__ : int = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Optional[Any] = image_size
__magic_name__ : List[str] = patch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Optional[Any] = last_hidden_size
__magic_name__ : Any = num_attention_heads
__magic_name__ : Any = hidden_act
__magic_name__ : int = conv_kernel_size
__magic_name__ : int = output_stride
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Optional[int] = classifier_dropout_prob
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : List[str] = is_training
__magic_name__ : List[Any] = num_labels
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : Optional[int] = scope
def lowerCAmelCase__ ( self: str ) -> Any:
__magic_name__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Dict = None
__magic_name__ : Optional[int] = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__magic_name__ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self: Optional[int] ) -> Any:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Optional[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Tuple , __UpperCamelCase: str , __UpperCamelCase: Optional[int] ) -> Dict:
__magic_name__ : Tuple = MobileViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__magic_name__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: Any , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Any ) -> Any:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : int = MobileViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__magic_name__ : Optional[Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: str , __UpperCamelCase: Union[str, Any] ) -> Dict:
__magic_name__ : Any = self.num_labels
__magic_name__ : List[Any] = MobileViTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__magic_name__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__magic_name__ : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
__magic_name__ : Any = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = config_and_inputs
__magic_name__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
__magic_name__ : Dict = MobileViTModelTester(self )
__magic_name__ : Tuple = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def lowerCAmelCase__ ( self: str ) -> Dict:
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def lowerCAmelCase__ ( self: Any ) -> Any:
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(__UpperCamelCase )
__magic_name__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Any = [*signature.parameters.keys()]
__magic_name__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self: int ) -> Dict:
def check_hidden_states_output(__UpperCamelCase: Any , __UpperCamelCase: str , __UpperCamelCase: List[Any] ):
__magic_name__ : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__magic_name__ : Tuple = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__magic_name__ : str = outputs.hidden_states
__magic_name__ : Optional[Any] = 5
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__magic_name__ : List[Any] = 2
for i in range(len(__UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Dict = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: str ) -> List[str]:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def lowerCAmelCase__ ( self: int ) -> int:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Dict = MobileViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
__magic_name__ : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(__UpperCamelCase )
__magic_name__ : Any = self.default_image_processor
__magic_name__ : Tuple = prepare_img()
__magic_name__ : str = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : Dict = model(**__UpperCamelCase )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__magic_name__ : Union[str, Any] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
__magic_name__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__magic_name__ : Union[str, Any] = model.to(__UpperCamelCase )
__magic_name__ : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__magic_name__ : Union[str, Any] = prepare_img()
__magic_name__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**__UpperCamelCase )
__magic_name__ : List[Any] = outputs.logits
# verify the logits
__magic_name__ : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCamelCase )
__magic_name__ : List[Any] = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
__magic_name__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__magic_name__ : Union[str, Any] = model.to(__UpperCamelCase )
__magic_name__ : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__magic_name__ : Union[str, Any] = prepare_img()
__magic_name__ : int = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : Optional[int] = model(**__UpperCamelCase )
__magic_name__ : List[str] = outputs.logits.detach().cpu()
__magic_name__ : str = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(50, 60)] )
__magic_name__ : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
__magic_name__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
__magic_name__ : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 436
| 0
|
from __future__ import annotations
def lowercase__( A , A ):
snake_case__ , snake_case__ : Optional[Any] = position
snake_case__ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case__ : Optional[Any] = []
for position in positions:
snake_case__ , snake_case__ : str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A )
return permissible_positions
def lowercase__( A ):
return not any(elem == 0 for row in board for elem in row )
def lowercase__( A , A , A ):
if is_complete(A ):
return True
for position in get_valid_pos(A , len(A ) ):
snake_case__ , snake_case__ : Dict = position
if board[y][x] == 0:
snake_case__ : Tuple = curr + 1
if open_knight_tour_helper(A , A , curr + 1 ):
return True
snake_case__ : Tuple = 0
return False
def lowercase__( A ):
snake_case__ : int = [[0 for i in range(A )] for j in range(A )]
for i in range(A ):
for j in range(A ):
snake_case__ : Union[str, Any] = 1
if open_knight_tour_helper(A , (i, j) , 1 ):
return board
snake_case__ : Dict = 0
snake_case__ : List[Any] = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
import sys
from collections import defaultdict
class snake_case__ :
def __init__( self : List[Any] ):
snake_case__ : Dict = []
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Tuple ):
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str ):
snake_case__ : Union[str, Any] = pos
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case__ : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case__ : Optional[int] = 2 * start + 1
else:
snake_case__ : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case__ , snake_case__ : int = heap[smallest_child], positions[smallest_child]
snake_case__ , snake_case__ : str = (
heap[start],
positions[start],
)
snake_case__ , snake_case__ : int = temp, tempa
snake_case__ : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _lowerCamelCase )
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ):
snake_case__ : Optional[Any] = position[index]
while index != 0:
snake_case__ : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case__ : Optional[Any] = heap[parent]
snake_case__ : Dict = position[parent]
self.set_position(position[parent] , _lowerCamelCase )
else:
snake_case__ : Tuple = val
snake_case__ : Optional[Any] = temp
self.set_position(_lowerCamelCase , _lowerCamelCase )
break
snake_case__ : Optional[int] = parent
else:
snake_case__ : List[str] = val
snake_case__ : List[Any] = temp
self.set_position(_lowerCamelCase , 0 )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
snake_case__ : int = len(_lowerCamelCase ) // 2 - 1
for i in range(_lowerCamelCase , -1 , -1 ):
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
snake_case__ : Any = positions[0]
snake_case__ : List[str] = sys.maxsize
self.top_to_bottom(_lowerCamelCase , 0 , len(_lowerCamelCase ) , _lowerCamelCase )
return temp
def lowercase__( A ):
snake_case__ : int = Heap()
snake_case__ : Optional[int] = [0] * len(A )
snake_case__ : Any = [-1] * len(A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case__ : Dict = []
for vertex in range(len(A ) ):
distance_tv.append(sys.maxsize )
positions.append(A )
heap.node_position.append(A )
snake_case__ : Tuple = []
snake_case__ : int = 1
snake_case__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = distance
heap.heapify(A , A )
for _ in range(1 , len(A ) ):
snake_case__ : Tuple = heap.delete_minimum(A , A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case__ : List[str] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A )]
):
snake_case__ : Any = distance
heap.bottom_to_top(
A , heap.get_position(A ) , A , A )
snake_case__ : Union[str, Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase : Union[str, Any] = int(input('Enter number of edges: ').strip())
lowerCamelCase : str = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 1
|
from math import factorial
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
_A = real
if isinstance(snake_case_ , snake_case_ ):
_A = [1] * rank
else:
_A = rank
def __repr__( self ):
return (
F"{self.real}+"
F"{'+'.join(str(snake_case_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def lowerCAmelCase__ ( self ):
_A = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case_ )
def __add__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
return Dual(self.real + other , self.duals )
_A = self.duals.copy()
_A = other.duals.copy()
if len(snake_case_ ) > len(snake_case_ ):
o_dual.extend([1] * (len(snake_case_ ) - len(snake_case_ )) )
elif len(snake_case_ ) < len(snake_case_ ):
s_dual.extend([1] * (len(snake_case_ ) - len(snake_case_ )) )
_A = []
for i in range(len(snake_case_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case_ )
__magic_name__ = __add__
def __sub__( self , snake_case_ ):
return self + other * -1
def __mul__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_A = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case_ )
_A = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case_ )
__magic_name__ = __mul__
def __truediv__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_A = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case_ )
raise ValueError
def __floordiv__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_A = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case_ )
raise ValueError
def __pow__( self , snake_case_ ):
if n < 0 or isinstance(snake_case_ , snake_case_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_A = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not callable(_SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(_SCREAMING_SNAKE_CASE , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires an int as input for order' )
_A = Dual(_SCREAMING_SNAKE_CASE , 1 )
_A = func(_SCREAMING_SNAKE_CASE )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 27
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Any:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCAmelCase_ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ = ldmad_pipe.tokenizer(
_a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
lowerCAmelCase_ = text_inputs["input_ids"].to(_a )
lowerCAmelCase_ = ldmad_pipe.text_encoder(_a )[0]
lowerCAmelCase_ = prompt_embeds
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = "french fries"
lowerCAmelCase_ = ldmad_pipe(**_a , negative_prompt=_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCAmelCase_ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Dict:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCAmelCase_ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCAmelCase_ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Union[str, Any]:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> str:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_9_5_5_8_6
lowerCAmelCase_ = 0.3_3_7_9_5_5_1_5
lowerCAmelCase_ = 1_1_2.4_8_5_1_8
lowerCAmelCase_ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __a ( self ) -> Dict:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_1_9_4_1_2_7
lowerCAmelCase_ = 0.3_5_3_7_5_5_8_6
lowerCAmelCase_ = 0.5_6_3_8_5_0_2
lowerCAmelCase_ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 122
| 0
|
import functools
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ) -> int:
__UpperCamelCase : Tuple = len(__snake_case )
__UpperCamelCase : Optional[Any] = len(__snake_case )
@functools.cache
def min_distance(__lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__UpperCamelCase : int = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
def __lowerCamelCase ( __lowerCAmelCase : list ) -> list:
__UpperCamelCase : Dict = len(__lowerCAmelCase )
for i in range(1 , __lowerCAmelCase ):
__UpperCamelCase : Dict = collection[i]
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Dict = i - 1
while low <= high:
__UpperCamelCase : int = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase : str = mid - 1
else:
__UpperCamelCase : str = mid + 1
for j in range(__lowerCAmelCase , __lowerCAmelCase , -1 ):
__UpperCamelCase : str = collection[j - 1]
__UpperCamelCase : int = val
return collection
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 515
| 0
|
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__magic_name__ :Tuple = ''''''
__magic_name__ :Optional[Any] = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__magic_name__ , __magic_name__ :List[Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
__magic_name__ :List[str] = [1 for i in range(len(snake_case ) )]
# for each character in new_string find corresponding palindromic string
__magic_name__ :Dict = 0
for j in range(len(snake_case ) ):
__magic_name__ :Optional[Any] = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__magic_name__ :Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__magic_name__ :Optional[Any] = j - k + 1 # noqa: E741
__magic_name__ :Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
__magic_name__ :Union[str, Any] = length[j]
__magic_name__ :Union[str, Any] = j
# create that string
__magic_name__ :Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
__lowercase =os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
__lowercase =os.path.join(_lowerCAmelCase , 'words.txt' )
__lowercase =''
with open(_lowerCAmelCase ) as f:
__lowercase =f.readline()
__lowercase =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__lowercase =[
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 474
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = (DPMSolverSDEScheduler,)
__UpperCAmelCase = 10
def A ( self , **snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**snake_case_ )
return config
def A ( self ) -> Dict:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def A ( self ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def A ( self ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(snake_case_ , snake_case_ )
__lowercase = model(snake_case_ , snake_case_ )
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(snake_case_ ) )
__lowercase = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(snake_case_ , snake_case_ )
__lowercase = model(snake_case_ , snake_case_ )
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(snake_case_ ) )
__lowercase = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def A ( self ) -> int:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(snake_case_ , snake_case_ )
__lowercase = model(snake_case_ , snake_case_ )
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(snake_case_ ) )
__lowercase = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
__lowercase = sample.to(snake_case_ )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(snake_case_ , snake_case_ )
__lowercase = model(snake_case_ , snake_case_ )
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(snake_case_ ) )
__lowercase = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 527
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase_ ( *_UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = list(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase_ ( _UpperCamelCase = None , _UpperCamelCase = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*_UpperCamelCase , **_UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(_UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1):
__lowercase = ''', '''.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(_UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 527
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = """MobileNetV1Config"""
# Base docstring
__lowerCamelCase = """google/mobilenet_v1_1.0_224"""
__lowerCamelCase = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__lowerCamelCase = """google/mobilenet_v1_1.0_224"""
__lowerCamelCase = """tabby, tabby cat"""
__lowerCamelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case=None ) -> str:
_A = {}
if isinstance(__snake_case , __snake_case ):
_A = model.mobilenet_va
else:
_A = model
_A = '''MobilenetV1/Conv2d_0/'''
_A = backbone.conv_stem.convolution.weight
_A = backbone.conv_stem.normalization.bias
_A = backbone.conv_stem.normalization.weight
_A = backbone.conv_stem.normalization.running_mean
_A = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_A = i + 1
_A = i * 2
_A = backbone.layer[pt_index]
_A = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_A = pointer.convolution.weight
_A = pointer.normalization.bias
_A = pointer.normalization.weight
_A = pointer.normalization.running_mean
_A = pointer.normalization.running_var
_A = backbone.layer[pt_index + 1]
_A = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_A = pointer.convolution.weight
_A = pointer.normalization.bias
_A = pointer.normalization.weight
_A = pointer.normalization.running_mean
_A = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
_A = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
_A = model.classifier.weight
_A = model.classifier.bias
return tf_to_pt_map
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
_A = tf.train.list_variables(__snake_case )
_A = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
_A = tf.train.load_variable(__snake_case , __snake_case )
_A = array
# Build TF to PyTorch weights loading map
_A = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
_A = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
_A = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
_A = array.squeeze().transpose()
else:
_A = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
_A = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> torch.Tensor:
_A , _A = features.shape[-2:]
_A , _A = conv_layer.stride
_A , _A = conv_layer.kernel_size
if in_height % stride_height == 0:
_A = max(kernel_height - stride_height , 0 )
else:
_A = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_A = max(kernel_width - stride_width , 0 )
else:
_A = max(kernel_width - (in_width % stride_width) , 0 )
_A = pad_along_width // 2
_A = pad_along_width - pad_left
_A = pad_along_height // 2
_A = pad_along_height - pad_top
_A = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , a , a , a , a , a = 1 , a = 1 , a = False , a = True , a = True , ) -> None:
"""simple docstring"""
super().__init__()
_A = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_A = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_A = nn.Convad(
in_channels=a , out_channels=a , kernel_size=a , stride=a , padding=a , groups=a , bias=a , padding_mode='''zeros''' , )
if use_normalization:
_A = nn.BatchNormad(
num_features=a , eps=config.layer_norm_eps , momentum=0.9_997 , affine=a , track_running_stats=a , )
else:
_A = None
if use_activation:
if isinstance(a , a ):
_A = ACTaFN[use_activation]
elif isinstance(config.hidden_act , a ):
_A = ACTaFN[config.hidden_act]
else:
_A = config.hidden_act
else:
_A = None
def lowercase_ ( self , a ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
_A = apply_tf_padding(a , self.convolution )
_A = self.convolution(a )
if self.normalization is not None:
_A = self.normalization(a )
if self.activation is not None:
_A = self.activation(a )
return features
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = MobileNetVaConfig
lowerCamelCase_ = load_tf_weights_in_mobilenet_va
lowerCamelCase_ = '''mobilenet_v1'''
lowerCamelCase_ = '''pixel_values'''
lowerCamelCase_ = False
def lowercase_ ( self , a ) -> None:
"""simple docstring"""
if isinstance(a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowerCamelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowerCamelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' ,lowerCamelCase ,)
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
def __init__( self , a , a = True ) -> int:
"""simple docstring"""
super().__init__(a )
_A = config
_A = 3_2
_A = max(int(depth * config.depth_multiplier ) , config.min_depth )
_A = MobileNetVaConvLayer(
a , in_channels=config.num_channels , out_channels=a , kernel_size=3 , stride=2 , )
_A = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_A = nn.ModuleList()
for i in range(1_3 ):
_A = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_A = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=3 , stride=strides[i] , groups=a , ) )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=1 , ) )
_A = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowercase_ ( self , a ) -> Dict:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self , a = None , a = None , a = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
_A = self.conv_stem(a )
_A = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_A = layer_module(a )
if output_hidden_states:
_A = all_hidden_states + (hidden_states,)
_A = hidden_states
if self.pooler is not None:
_A = torch.flatten(self.pooler(a ) , start_dim=1 )
else:
_A = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=a , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' ,lowerCamelCase ,)
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
def __init__( self , a ) -> None:
"""simple docstring"""
super().__init__(a )
_A = config.num_labels
_A = MobileNetVaModel(a )
_A = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_A = nn.Dropout(config.classifier_dropout_prob , inplace=a )
_A = nn.Linear(a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self , a = None , a = None , a = None , a = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.mobilenet_va(a , output_hidden_states=a , return_dict=a )
_A = outputs.pooler_output if return_dict else outputs[1]
_A = self.classifier(self.dropout(a ) )
_A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A = '''single_label_classification'''
else:
_A = '''multi_label_classification'''
if self.config.problem_type == "regression":
_A = MSELoss()
if self.num_labels == 1:
_A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A = loss_fct(a , a )
elif self.config.problem_type == "single_label_classification":
_A = CrossEntropyLoss()
_A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A = BCEWithLogitsLoss()
_A = loss_fct(a , a )
if not return_dict:
_A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=a , logits=a , hidden_states=outputs.hidden_states , )
| 317
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _snake_case ( lowerCamelCase ,lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = '''pixel_values'''
lowerCamelCase_ = False
lowerCamelCase_ = TimmBackboneConfig
def __init__( self , a , **a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(a )
_A = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(a , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_A = getattr(a , '''use_pretrained_backbone''' , a )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_A = config.out_indices if getattr(a , '''out_indices''' , a ) is not None else (-1,)
_A = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_A = self._backbone.return_layers
_A = {layer['''module''']: str(a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(a )
@classmethod
def lowercase_ ( cls , a , *a , **a ) -> int:
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_A = kwargs.pop('''config''' , TimmBackboneConfig() )
_A = kwargs.pop('''use_timm_backbone''' , a )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_A = kwargs.pop('''num_channels''' , config.num_channels )
_A = kwargs.pop('''features_only''' , config.features_only )
_A = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_A = kwargs.pop('''out_indices''' , config.out_indices )
_A = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a )
def lowercase_ ( self , a ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase_ ( self , a , a=None , a=None , a=None , **a ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_A = self._all_layers
_A = self._backbone(a , **a )
_A = self._return_layers
_A = tuple(hidden_states[i] for i in self.out_indices )
else:
_A = self._backbone(a , **a )
_A = None
_A = tuple(a )
_A = tuple(a ) if hidden_states is not None else None
if not return_dict:
_A = (feature_maps,)
if output_hidden_states:
_A = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a )
| 317
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : jnp.ndarray
__a : jnp.ndarray
class A ( nn.Module ):
"""simple docstring"""
__a : int
__a : Tuple[int] = (16, 32, 96, 256)
__a : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase_ : Optional[Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase_ : Tuple = self.block_out_channels[i]
UpperCamelCase_ : List[Any] = self.block_out_channels[i + 1]
UpperCamelCase_ : Optional[int] = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
UpperCamelCase_ : str = blocks
UpperCamelCase_ : str = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
UpperCamelCase_ : Any = self.conv_in(__lowerCAmelCase )
UpperCamelCase_ : List[str] = nn.silu(__lowerCAmelCase )
for block in self.blocks:
UpperCamelCase_ : List[Any] = block(__lowerCAmelCase )
UpperCamelCase_ : str = nn.silu(__lowerCAmelCase )
UpperCamelCase_ : Dict = self.conv_out(__lowerCAmelCase )
return embedding
@flax_register_to_config
class A ( nn.Module, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : int = 32
__a : int = 4
__a : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a : Union[bool, Tuple[bool]] = False
__a : Tuple[int] = (320, 640, 1280, 1280)
__a : int = 2
__a : Union[int, Tuple[int]] = 8
__a : Optional[Union[int, Tuple[int]]] = None
__a : int = 1280
__a : float = 0.0
__a : bool = False
__a : jnp.dtype = jnp.floataa
__a : bool = True
__a : int = 0
__a : str = "rgb"
__a : Tuple[int] = (16, 32, 96, 256)
def _UpperCAmelCase ( self , __lowerCAmelCase ):
# init input tensors
UpperCamelCase_ : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase_ : str = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
UpperCamelCase_ : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase_ : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase_ : Optional[int] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase_ : int = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
UpperCamelCase_ , UpperCamelCase_ : Tuple = jax.random.split(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.block_out_channels
UpperCamelCase_ : List[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase_ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase_ : List[str] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase_ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase_ : str = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
UpperCamelCase_ : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase_ : Tuple = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase_ : str = []
UpperCamelCase_ : str = []
UpperCamelCase_ : Optional[Any] = block_out_channels[0]
UpperCamelCase_ : Optional[int] = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase_ : Optional[int] = output_channel
UpperCamelCase_ : Optional[Any] = block_out_channels[i]
UpperCamelCase_ : Optional[Any] = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase_ : Any = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase_ : Optional[int] = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
for _ in range(self.layers_per_block ):
UpperCamelCase_ : List[Any] = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
if not is_final_block:
UpperCamelCase_ : str = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
UpperCamelCase_ : str = down_blocks
UpperCamelCase_ : Optional[int] = controlnet_down_blocks
# mid
UpperCamelCase_ : Any = block_out_channels[-1]
UpperCamelCase_ : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase_ : Union[str, Any] = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
UpperCamelCase_ : Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase_ : List[str] = jnp.flip(__lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
UpperCamelCase_ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase_ : Dict = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase_ : List[Any] = jnp.expand_dims(__lowerCAmelCase , 0 )
UpperCamelCase_ : List[str] = self.time_proj(__lowerCAmelCase )
UpperCamelCase_ : str = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
UpperCamelCase_ : Dict = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
UpperCamelCase_ : List[str] = self.conv_in(__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
UpperCamelCase_ : Union[str, Any] = self.controlnet_cond_embedding(__lowerCAmelCase )
sample += controlnet_cond
# 3. down
UpperCamelCase_ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ , UpperCamelCase_ : List[str] = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
UpperCamelCase_ , UpperCamelCase_ : List[Any] = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase_ : int = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
UpperCamelCase_ : Dict = ()
for down_block_res_sample, controlnet_block in zip(__lowerCAmelCase , self.controlnet_down_blocks ):
UpperCamelCase_ : str = controlnet_block(__lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase_ : Dict = controlnet_down_block_res_samples
UpperCamelCase_ : Union[str, Any] = self.controlnet_mid_block(__lowerCAmelCase )
# 6. scaling
UpperCamelCase_ : Union[str, Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCAmelCase , mid_block_res_sample=__lowerCAmelCase )
| 543
|
'''simple docstring'''
import random
def snake_case ( a_ : int , a_ : float , a_ : bool = False ) -> dict:
"""simple docstring"""
UpperCamelCase_ : dict = {i: [] for i in range(a_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a_ ):
for j in range(i + 1 , a_ ):
if random.random() < probability:
graph[i].append(a_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a_ )
return graph
def snake_case ( a_ : int ) -> dict:
"""simple docstring"""
return {
i: [j for j in range(a_ ) if i != j] for i in range(a_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 543
| 1
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "esm"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1026 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Optional[int]:
super().__init__(pad_token_id=_lowerCAmelCase , mask_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = emb_layer_norm_before
_lowerCAmelCase = token_dropout
_lowerCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCAmelCase = EsmFoldConfig()
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = EsmFoldConfig(**_lowerCAmelCase )
_lowerCAmelCase = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCAmelCase = get_default_vocab_list()
else:
_lowerCAmelCase = vocab_list
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , _lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , _lowerCAmelCase ):
_lowerCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def _snake_case ( self ) -> Tuple:
if self.trunk is None:
_lowerCAmelCase = TrunkConfig()
elif isinstance(self.trunk , _lowerCAmelCase ):
_lowerCAmelCase = TrunkConfig(**self.trunk )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = asdict(self )
_lowerCAmelCase = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1_024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def _snake_case ( self ) -> List[Any]:
if self.structure_module is None:
_lowerCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , _lowerCAmelCase ):
_lowerCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_lowerCAmelCase = self.sequence_state_dim // self.sequence_head_width
_lowerCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = asdict(self )
_lowerCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1e-8
__lowerCamelCase : float = 1e5
def _snake_case ( self ) -> Union[str, Any]:
return asdict(self )
def __a():
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 18
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18
| 1
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase ):
A_ = parent
A_ = config_class
A_ = has_text_modality
A_ = kwargs
A_ = common_properties
def lowercase_ ( self ):
A_ = self.config_class(**self.inputs_dict )
A_ = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) , msg=f'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(__UpperCamelCase ):
try:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=f'`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__UpperCamelCase ):
try:
A_ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=f'`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowercase_ ( self ):
A_ = self.config_class(**self.inputs_dict )
A_ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __UpperCamelCase )
def lowercase_ ( self ):
A_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase , "config.json" )
config_first.to_json_file(__UpperCamelCase )
A_ = self.config_class.from_json_file(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ):
A_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__UpperCamelCase )
A_ = self.config_class.from_pretrained(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ):
A_ = self.config_class(**self.inputs_dict )
A_ = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
config_first.save_pretrained(__UpperCamelCase )
A_ = self.config_class.from_pretrained(__UpperCamelCase , subfolder=__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ):
A_ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowercase_ ( self ):
if self.config_class.is_composition:
return
A_ = self.config_class()
self.parent.assertIsNotNone(__UpperCamelCase )
def lowercase_ ( self ):
A_ = copy.deepcopy(__UpperCamelCase )
A_ = self.config_class(**__UpperCamelCase )
A_ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__UpperCamelCase , __UpperCamelCase ) != value:
wrong_values.append((key, getattr(__UpperCamelCase , __UpperCamelCase ), value) )
if len(__UpperCamelCase ) > 0:
A_ = "\n".join([f'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(f'The following keys were not properly set in the config:\n{errors}' )
def lowercase_ ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 608
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
super().__init__()
self.register_modules(transformer=__UpperCamelCase , vae=__UpperCamelCase , scheduler=__UpperCamelCase )
# create a imagenet -> id dictionary for easier use
A_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
A_ = int(__UpperCamelCase )
A_ = dict(sorted(self.labels.items() ) )
def lowercase_ ( self , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
A_ = list(__UpperCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 4.0 , __UpperCamelCase = None , __UpperCamelCase = 50 , __UpperCamelCase = "pil" , __UpperCamelCase = True , ):
A_ = len(__UpperCamelCase )
A_ = self.transformer.config.sample_size
A_ = self.transformer.config.in_channels
A_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__UpperCamelCase , device=self.device , dtype=self.transformer.dtype , )
A_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ = torch.tensor(__UpperCamelCase , device=self.device ).reshape(-1 )
A_ = torch.tensor([1000] * batch_size , device=self.device )
A_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ = latent_model_input[: len(__UpperCamelCase ) // 2]
A_ = torch.cat([half, half] , dim=0 )
A_ = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A_ = t
if not torch.is_tensor(__UpperCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ = latent_model_input.device.type == "mps"
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A_ = torch.floataa if is_mps else torch.floataa
else:
A_ = torch.intaa if is_mps else torch.intaa
A_ = torch.tensor([timesteps] , dtype=__UpperCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ = self.transformer(
__UpperCamelCase , timestep=__UpperCamelCase , class_labels=__UpperCamelCase ).sample
# perform guidance
if guidance_scale > 1:
A_ , A_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_ , A_ = torch.split(__UpperCamelCase , len(__UpperCamelCase ) // 2 , dim=0 )
A_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ = torch.cat([half_eps, half_eps] , dim=0 )
A_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_ , A_ = torch.split(__UpperCamelCase , __UpperCamelCase , dim=1 )
else:
A_ = noise_pred
# compute previous image: x_t -> x_t-1
A_ = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
if guidance_scale > 1:
A_ , A_ = latent_model_input.chunk(2 , dim=0 )
else:
A_ = latent_model_input
A_ = 1 / self.vae.config.scaling_factor * latents
A_ = self.vae.decode(__UpperCamelCase ).sample
A_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 608
| 1
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
|
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13
| 1
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowercase : Any = logging.get_logger(__name__)
class __lowercase :
def __init__(self , A = None , A = None , A=None , A=None ):
if not conversation_id:
lowerCamelCase_ : Any = uuid.uuida()
if past_user_inputs is None:
lowerCamelCase_ : Optional[Any] = []
if generated_responses is None:
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : uuid.UUID = conversation_id
lowerCamelCase_ : List[str] = past_user_inputs
lowerCamelCase_ : List[str] = generated_responses
lowerCamelCase_ : Optional[str] = text
def __eq__(self , A ):
if not isinstance(A , A ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase__ (self , A , A = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
lowerCamelCase_ : Optional[Any] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowerCamelCase_ : Tuple = text
def UpperCAmelCase__ (self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCamelCase_ : Tuple = None
def UpperCAmelCase__ (self , A ):
self.generated_responses.append(A )
def UpperCAmelCase__ (self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ):
lowerCamelCase_ : str = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowerCamelCase_ : str = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_lowercase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
super().__init__(*A , **A )
if self.tokenizer.pad_token_id is None:
lowerCamelCase_ : Optional[Any] = self.tokenizer.eos_token
def UpperCAmelCase__ (self , A=None , A=None , A=None , **A ):
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : int = {}
lowerCamelCase_ : Any = {}
if min_length_for_response is not None:
lowerCamelCase_ : Any = min_length_for_response
if minimum_tokens is not None:
lowerCamelCase_ : Optional[Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowerCamelCase_ : Any = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCamelCase_ : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A )
return preprocess_params, forward_params, postprocess_params
def __call__(self , A , A=0 , **A ):
lowerCamelCase_ : Optional[Any] = super().__call__(A , num_workers=A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase__ (self , A , A=3_2 ):
if not isinstance(A , A ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowerCamelCase_ : int = self.tokenizer._build_conversation_input_ids(A )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCamelCase_ : int = self._legacy_parse_and_tokenize(A )
if self.framework == "pt":
lowerCamelCase_ : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCamelCase_ : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase__ (self , A , A=1_0 , **A ):
lowerCamelCase_ : Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowerCamelCase_ : int = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowerCamelCase_ : Optional[Any] = max_length - minimum_tokens
lowerCamelCase_ : Dict = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowerCamelCase_ : Union[str, Any] = model_inputs['''attention_mask'''][:, -trim:]
lowerCamelCase_ : Union[str, Any] = model_inputs.pop('''conversation''' )
lowerCamelCase_ : Any = max_length
lowerCamelCase_ : Optional[Any] = self.model.generate(**A , **A )
if self.model.config.is_encoder_decoder:
lowerCamelCase_ : Tuple = 1
else:
lowerCamelCase_ : Any = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase__ (self , A , A=True ):
lowerCamelCase_ : int = model_outputs['''output_ids''']
lowerCamelCase_ : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
lowerCamelCase_ : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(A )
return conversation
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = self.tokenizer.eos_token_id
lowerCamelCase_ : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A , add_special_tokens=A ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A , add_special_tokens=A ) )
if len(A ) > self.tokenizer.model_max_length:
lowerCamelCase_ : Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 711
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357
| 0
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A:
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.2 , SCREAMING_SNAKE_CASE__=0.2 ) -> int:
"""simple docstring"""
_UpperCamelCase :Optional[int] = bp_numa
_UpperCamelCase :Optional[int] = bp_numa
_UpperCamelCase :Tuple = bp_numa
_UpperCamelCase :str = conva_get[:2]
_UpperCamelCase :Any = conva_get[2]
_UpperCamelCase :Optional[int] = size_pa
_UpperCamelCase :Any = rate_w
_UpperCamelCase :int = rate_t
_UpperCamelCase :Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_UpperCamelCase :int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_UpperCamelCase :List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_UpperCamelCase :List[str] = -2 * np.random.rand(self.conva[1] ) + 1
_UpperCamelCase :str = -2 * np.random.rand(self.num_bpa ) + 1
_UpperCamelCase :Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_UpperCamelCase :Any = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as f:
pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"Model saved: {save_path}" )
@classmethod
def _UpperCamelCase( cls , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
_UpperCamelCase :int = pickle.load(SCREAMING_SNAKE_CASE__ ) # noqa: S301
_UpperCamelCase :Dict = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
_UpperCamelCase :Dict = model_dic.get('''size_pooling1''' )
_UpperCamelCase :int = model_dic.get('''num_bp1''' )
_UpperCamelCase :Optional[Any] = model_dic.get('''num_bp2''' )
_UpperCamelCase :int = model_dic.get('''num_bp3''' )
_UpperCamelCase :Union[str, Any] = model_dic.get('''rate_weight''' )
_UpperCamelCase :Union[str, Any] = model_dic.get('''rate_thre''' )
# create model instance
_UpperCamelCase :int = CNN(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# modify model parameter
_UpperCamelCase :Optional[int] = model_dic.get('''w_conv1''' )
_UpperCamelCase :Union[str, Any] = model_dic.get('''wkj''' )
_UpperCamelCase :int = model_dic.get('''vji''' )
_UpperCamelCase :Union[str, Any] = model_dic.get('''thre_conv1''' )
_UpperCamelCase :List[Any] = model_dic.get('''thre_bp2''' )
_UpperCamelCase :Optional[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return round(SCREAMING_SNAKE_CASE__ , 3 )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Tuple = convs[0]
_UpperCamelCase :Tuple = convs[1]
_UpperCamelCase :str = np.shape(SCREAMING_SNAKE_CASE__ )[0]
# get the data slice of original image data, data_focus
_UpperCamelCase :List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE__ ):
for j_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Any = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(SCREAMING_SNAKE_CASE__ )
# calculate the feature map of every single kernel, and saved as list of matrix
_UpperCamelCase :Optional[int] = []
_UpperCamelCase :List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Union[str, Any] = []
for i_focus in range(len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCamelCase :List[str] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase :Union[str, Any] = np.asmatrix(SCREAMING_SNAKE_CASE__ ).reshape(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
data_featuremap.append(SCREAMING_SNAKE_CASE__ )
# expanding the data slice to One dimenssion
_UpperCamelCase :Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase :str = np.asarray(SCREAMING_SNAKE_CASE__ )
return focus_list, data_featuremap
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="average_pool" ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = len(featuremaps[0] )
_UpperCamelCase :Optional[int] = int(size_map / size_pooling )
_UpperCamelCase :List[Any] = []
for i_map in range(len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCamelCase :Optional[int] = featuremaps[i_map]
_UpperCamelCase :Union[str, Any] = []
for i_focus in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for j_focus in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(SCREAMING_SNAKE_CASE__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase :List[Any] = np.asmatrix(SCREAMING_SNAKE_CASE__ ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
featuremap_pooled.append(SCREAMING_SNAKE_CASE__ )
return featuremap_pooled
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCamelCase :List[str] = np.shape(data[i] )
_UpperCamelCase :List[str] = data[i].reshape(1 , shapes[0] * shapes[1] )
_UpperCamelCase :List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Tuple = np.asarray(SCREAMING_SNAKE_CASE__ )
return data_expanded
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[Any] = np.shape(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Dict = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
_UpperCamelCase :Tuple = []
_UpperCamelCase :List[Any] = 0
for i_map in range(SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Tuple = np.ones((size_map, size_map) )
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for j in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :List[Any] = pd_pool[
i_pool
]
_UpperCamelCase :Tuple = i_pool + 1
_UpperCamelCase :str = np.multiply(
SCREAMING_SNAKE_CASE__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(SCREAMING_SNAKE_CASE__ )
return pd_all
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=bool ) -> Optional[Any]:
"""simple docstring"""
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(SCREAMING_SNAKE_CASE__ )) )
print((''' - - Shape: Teach_Data ''', np.shape(SCREAMING_SNAKE_CASE__ )) )
_UpperCamelCase :List[str] = 0
_UpperCamelCase :Dict = []
_UpperCamelCase :Tuple = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
_UpperCamelCase :str = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(SCREAMING_SNAKE_CASE__ ) ):
# print('------------Learning Image: %d--------------'%p)
_UpperCamelCase :Any = np.asmatrix(datas_train[p] )
_UpperCamelCase :List[str] = np.asarray(datas_teach[p] )
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = self.convolute(
SCREAMING_SNAKE_CASE__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase :Union[str, Any] = self.pooling(SCREAMING_SNAKE_CASE__ , self.size_poolinga )
_UpperCamelCase :int = np.shape(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Dict = self._expand(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = data_bp_input
_UpperCamelCase :int = np.dot(SCREAMING_SNAKE_CASE__ , self.vji.T ) - self.thre_bpa
_UpperCamelCase :Tuple = self.sig(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE__ , self.wkj.T ) - self.thre_bpa
_UpperCamelCase :List[Any] = self.sig(SCREAMING_SNAKE_CASE__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_UpperCamelCase :Dict = np.multiply(
(data_teach - bp_outa) , np.multiply(SCREAMING_SNAKE_CASE__ , (1 - bp_outa) ) )
_UpperCamelCase :str = np.multiply(
np.dot(SCREAMING_SNAKE_CASE__ , self.wkj ) , np.multiply(SCREAMING_SNAKE_CASE__ , (1 - bp_outa) ) )
_UpperCamelCase :List[Any] = np.dot(SCREAMING_SNAKE_CASE__ , self.vji )
_UpperCamelCase :str = pd_i_all / (self.size_poolinga * self.size_poolinga)
_UpperCamelCase :Tuple = pd_conva_pooled.T.getA().tolist()
_UpperCamelCase :Tuple = self._calculate_gradient_from_pool(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_UpperCamelCase :Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
_UpperCamelCase :int = self.rate_weight * np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_UpperCamelCase :Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_UpperCamelCase :List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_UpperCamelCase :Optional[int] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_UpperCamelCase :Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
_UpperCamelCase :Tuple = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_UpperCamelCase :Tuple = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_UpperCamelCase :Optional[int] = rp + 1
_UpperCamelCase :Tuple = error_count / patterns
all_mse.append(SCREAMING_SNAKE_CASE__ )
def draw_error():
_UpperCamelCase :Dict = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(SCREAMING_SNAKE_CASE__ , '''+-''' )
plt.plot(SCREAMING_SNAKE_CASE__ , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(SCREAMING_SNAKE_CASE__ , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(SCREAMING_SNAKE_CASE__ )) )
for p in range(len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCamelCase :int = np.asmatrix(datas_test[p] )
_UpperCamelCase , _UpperCamelCase :int = self.convolute(
SCREAMING_SNAKE_CASE__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase :Union[str, Any] = self.pooling(SCREAMING_SNAKE_CASE__ , self.size_poolinga )
_UpperCamelCase :int = self._expand(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[Any] = data_bp_input
_UpperCamelCase :int = bp_outa * self.vji.T - self.thre_bpa
_UpperCamelCase :str = self.sig(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = bp_outa * self.wkj.T - self.thre_bpa
_UpperCamelCase :int = self.sig(SCREAMING_SNAKE_CASE__ )
produce_out.extend(bp_outa.getA().tolist() )
_UpperCamelCase :Union[str, Any] = [list(map(self.do_round , SCREAMING_SNAKE_CASE__ ) ) for each in produce_out]
return np.asarray(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Any = np.asmatrix(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase , _UpperCamelCase :Tuple = self.convolute(
SCREAMING_SNAKE_CASE__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase :Optional[Any] = self.pooling(SCREAMING_SNAKE_CASE__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 355
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ :Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ :int = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
for attribute in key.split('''.''' ):
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ ).shape
else:
_UpperCamelCase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_UpperCamelCase :str = value
elif weight_type == "weight_g":
_UpperCamelCase :Dict = value
elif weight_type == "weight_v":
_UpperCamelCase :Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase :str = value
else:
_UpperCamelCase :int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
_UpperCamelCase :Optional[int] = []
_UpperCamelCase :List[str] = fairseq_model.state_dict()
_UpperCamelCase :str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase :Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase :Dict = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase :Optional[int] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase :List[str] = True
if "*" in mapped_key:
_UpperCamelCase :List[str] = name.split(snake_case__ )[0].split('''.''' )[-2]
_UpperCamelCase :Tuple = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
_UpperCamelCase :List[Any] = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase :Union[str, Any] = '''weight_v'''
elif "weight" in name:
_UpperCamelCase :List[Any] = '''weight'''
elif "bias" in name:
_UpperCamelCase :List[Any] = '''bias'''
else:
_UpperCamelCase :List[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"Unused weights: {unused_weights}" )
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
_UpperCamelCase :Optional[int] = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase :Optional[int] = name.split('''.''' )
_UpperCamelCase :Optional[Any] = int(items[0] )
_UpperCamelCase :List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_UpperCamelCase :Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_UpperCamelCase :Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_UpperCamelCase :int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_UpperCamelCase :Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def A_ ( snake_case__ , snake_case__ ) -> List[str]:
_UpperCamelCase :str = SEWConfig()
if is_finetuned:
_UpperCamelCase :Optional[int] = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase :Dict = model.cfg
_UpperCamelCase :Dict = fs_config.conv_bias
_UpperCamelCase :int = eval(fs_config.conv_feature_layers )
_UpperCamelCase :List[Any] = [x[0] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[1] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[2] for x in conv_layers]
_UpperCamelCase :str = '''gelu'''
_UpperCamelCase :Optional[int] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_UpperCamelCase :List[Any] = 0.0
_UpperCamelCase :Optional[int] = fs_config.activation_fn.name
_UpperCamelCase :str = fs_config.encoder_embed_dim
_UpperCamelCase :Dict = 0.02
_UpperCamelCase :Optional[int] = fs_config.encoder_ffn_embed_dim
_UpperCamelCase :str = 1E-5
_UpperCamelCase :int = fs_config.encoder_layerdrop
_UpperCamelCase :Union[str, Any] = fs_config.encoder_attention_heads
_UpperCamelCase :List[str] = fs_config.conv_pos_groups
_UpperCamelCase :List[Any] = fs_config.conv_pos
_UpperCamelCase :List[str] = len(snake_case__ )
_UpperCamelCase :Optional[int] = fs_config.encoder_layers
_UpperCamelCase :Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase :List[Any] = model.cfg
_UpperCamelCase :List[Any] = fs_config.final_dropout
_UpperCamelCase :Dict = fs_config.layerdrop
_UpperCamelCase :Any = fs_config.activation_dropout
_UpperCamelCase :List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase :Optional[Any] = fs_config.attention_dropout
_UpperCamelCase :List[Any] = fs_config.dropout_input
_UpperCamelCase :Dict = fs_config.dropout
_UpperCamelCase :int = fs_config.mask_channel_length
_UpperCamelCase :Tuple = fs_config.mask_channel_prob
_UpperCamelCase :int = fs_config.mask_length
_UpperCamelCase :Dict = fs_config.mask_prob
_UpperCamelCase :List[Any] = '''Wav2Vec2FeatureExtractor'''
_UpperCamelCase :Optional[Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def A_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ) -> int:
if is_finetuned:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase :Any = SEWConfig.from_pretrained(snake_case__ )
else:
_UpperCamelCase :List[str] = convert_config(model[0] , snake_case__ )
_UpperCamelCase :List[str] = model[0].eval()
_UpperCamelCase :Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
_UpperCamelCase :str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
_UpperCamelCase :int = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase :List[Any] = target_dict.pad_index
_UpperCamelCase :str = target_dict.bos_index
_UpperCamelCase :int = target_dict.pad_index
_UpperCamelCase :Dict = target_dict.bos_index
_UpperCamelCase :int = target_dict.eos_index
_UpperCamelCase :str = len(target_dict.symbols )
_UpperCamelCase :Optional[int] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_UpperCamelCase :int = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
_UpperCamelCase :Dict = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_UpperCamelCase :Tuple = SEWForCTC(snake_case__ )
else:
_UpperCamelCase :str = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase__ :List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase__ :Tuple = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 355
| 1
|
'''simple docstring'''
A_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def A_ ( snake_case , snake_case , snake_case ):
assert len(str(_lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
SCREAMING_SNAKE_CASE:int = year // 100
SCREAMING_SNAKE_CASE:Optional[Any] = (5 * (century % 4) + 2) % 7
SCREAMING_SNAKE_CASE:str = year % 100
SCREAMING_SNAKE_CASE:List[str] = centurian % 12
SCREAMING_SNAKE_CASE:Union[str, Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
SCREAMING_SNAKE_CASE:List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
SCREAMING_SNAKE_CASE:Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A_ ( snake_case , snake_case , snake_case=False , snake_case=False , snake_case=True , snake_case=False , snake_case="dummy_doc" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: key_lines}
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: sys_lines}
SCREAMING_SNAKE_CASE:Dict = {}
SCREAMING_SNAKE_CASE:Optional[int] = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.get_doc_mentions(snake_case , key_doc_lines[doc] , snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:Dict = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.get_doc_mentions(snake_case , sys_doc_lines[doc] , snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:str = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
if remove_nested:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.remove_nested_coref_mentions(snake_case , snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.remove_nested_coref_mentions(snake_case , snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE:Optional[Any] = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = get_coref_infos(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {}
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = evaluator.evaluate_documents(snake_case , snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE:Any = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE:Tuple = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE:List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) ,codebase_urls=["https://github.com/ns-moosavi/coval"] ,reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] ,)
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=False ):
SCREAMING_SNAKE_CASE:Optional[int] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE:Optional[int] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE:List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ ,sys_lines=SCREAMING_SNAKE_CASE__ ,metrics=SCREAMING_SNAKE_CASE__ ,NP_only=SCREAMING_SNAKE_CASE__ ,remove_nested=SCREAMING_SNAKE_CASE__ ,keep_singletons=SCREAMING_SNAKE_CASE__ ,min_span=SCREAMING_SNAKE_CASE__ ,)
return score
| 465
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: List[Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Optional[int] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Any ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Union[str, Any] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: int ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: int ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Optional[Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: str ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: List[Any] ,*__lowerCAmelCase: str ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Union[str, Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Union[str, Any] ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: List[str] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Tuple ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Union[str, Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: int ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: str ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
| 46
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase ( _snake_case : Optional[int] ,_snake_case : Any ,_snake_case : Dict ,_snake_case : Union[str, Any] ,_snake_case : Optional[Any] ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(_snake_case ,_snake_case )
if weight_type is not None:
lowercase__ = getattr(_snake_case ,_snake_case ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase ( _snake_case : Optional[int] ,_snake_case : int ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase__ = None
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,hf_model.config.feat_extract_norm == "group" ,)
lowercase__ = True
elif name.split("." )[0] == "proj":
lowercase__ = fairseq_model.proj
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_snake_case )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" ,_snake_case )
if "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name:
lowercase__ = "bias"
elif "weight" in name:
lowercase__ = "weight"
else:
lowercase__ = None
set_recursively(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCamelCase ( _snake_case : Tuple ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def lowerCamelCase ( _snake_case : Tuple ):
'''simple docstring'''
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
lowercase__ = emb.weight.data
return lin_layer
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
with open(_snake_case ,"r" ,encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.split(" " )[0] for line in lines]
lowercase__ = len(_snake_case )
lowercase__ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_snake_case ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase ( _snake_case : int ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : Tuple ,_snake_case : Dict ,_snake_case : int ,_snake_case : int ,):
'''simple docstring'''
lowercase__ = WavaVecaConfig.from_pretrained(_snake_case )
lowercase__ = SpeechaTextaConfig.from_pretrained(
_snake_case ,vocab_size=_snake_case ,decoder_layers=_snake_case ,do_stable_layer_norm=_snake_case )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=_snake_case ,return_attention_mask=_snake_case ,)
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowercase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(_snake_case )
lowercase__ = recursively_load_weights_wavaveca(model.encoder ,_snake_case )
lowercase__ = SpeechaTextaForCausalLM(_snake_case )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=_snake_case )
# set output linear layer
unexpected_keys.remove("embed_out" )
lowercase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase__ = SpeechEncoderDecoderModel(encoder=_snake_case ,decoder=_snake_case )
lowercase__ = False
# add projection layer
lowercase__ = nn.Parameter(projection_layer.weight )
lowercase__ = nn.Parameter(projection_layer.bias )
lowercase__ = create_vocab_dict(_snake_case )
with open(os.path.join(_snake_case ,"vocab.json" ) ,"w" ) as fp:
json.dump(_snake_case ,_snake_case )
lowercase__ = SpeechaTextaTokenizer(os.path.join(_snake_case ,"vocab.json" ) )
tokenizer.save_pretrained(_snake_case )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = "speech_to_text_2"
lowercase__ = "wav2vec2"
lowercase__ = SpeechEncoderDecoderConfig.from_dict(_snake_case )
hf_wavavec.save_pretrained(_snake_case )
feature_extractor.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 717
|
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=100 ,UpperCAmelCase_=13 ,UpperCAmelCase_=30 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=None ,UpperCAmelCase_=[0, 1, 2, 3] ,) -> Any:
lowercase__ = parent
lowercase__ = 100
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = out_indices
lowercase__ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def _a ( self ) -> str:
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self ) -> List[str]:
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = BeitModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Tuple:
lowercase__ = BeitForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = self.type_sequence_label_size
lowercase__ = BeitForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = BeitForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = self.num_labels
lowercase__ = BeitForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _a ( self ) -> str:
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case (UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Optional[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ :Optional[int] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ :Any = False
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Optional[Any] = False
def _a ( self ) -> Union[str, Any]:
lowercase__ = BeitModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _a ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _a ( self ) -> int:
pass
def _a ( self ) -> Any:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) )
def _a ( self ) -> Tuple:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _a ( self ) -> Tuple:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _a ( self ) -> Any:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
def _a ( self ) -> int:
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCAmelCase_ ), BeitForMaskedImageModeling]:
continue
lowercase__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
lowercase__ = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ )
lowercase__ = model(**UpperCAmelCase_ ).loss
loss.backward()
def _a ( self ) -> str:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCAmelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ = model_class(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase_ )
model.train()
lowercase__ = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ )
lowercase__ = model(**UpperCAmelCase_ ).loss
loss.backward()
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@slow
def _a ( self ) -> Optional[Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = BeitModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case (unittest.TestCase ):
@cached_property
def _a ( self ) -> Any:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _a ( self ) -> int:
lowercase__ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).pixel_values.to(UpperCAmelCase_ )
# prepare bool_masked_pos
lowercase__ = torch.ones((1, 196) ,dtype=torch.bool ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(pixel_values=UpperCAmelCase_ ,bool_masked_pos=UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,UpperCAmelCase_ ,atol=1E-2 ) )
@slow
def _a ( self ) -> Optional[Any]:
lowercase__ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 281
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
@slow
def _a ( self ) -> Union[str, Any]:
lowercase__ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
@slow
def _a ( self ) -> Union[str, Any]:
lowercase__ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase__ = model.to(UpperCAmelCase_ )
lowercase__ = BeitImageProcessor(do_resize=UpperCAmelCase_ ,size=640 ,do_center_crop=UpperCAmelCase_ )
lowercase__ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
lowercase__ = Image.open(ds[0]["file"] )
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
lowercase__ = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] ,device=UpperCAmelCase_ ,)
else:
lowercase__ = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] ,device=UpperCAmelCase_ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
@slow
def _a ( self ) -> str:
lowercase__ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase__ = model.to(UpperCAmelCase_ )
lowercase__ = BeitImageProcessor(do_resize=UpperCAmelCase_ ,size=640 ,do_center_crop=UpperCAmelCase_ )
lowercase__ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
lowercase__ = Image.open(ds[0]["file"] )
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits.detach().cpu()
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ ,target_sizes=[(500, 300)] )
lowercase__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,UpperCAmelCase_ )
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ )
lowercase__ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,UpperCAmelCase_ )
| 539
| 0
|
'''simple docstring'''
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( A ) -> str:
assert type(A ) in (int, float) and decimal == int(A )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
if decimal < 0:
lowerCAmelCase__ = True
decimal *= -1
while decimal > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(A , 16 )
lowerCAmelCase__ = values[remainder] + hexadecimal
lowerCAmelCase__ = '''0x''' + hexadecimal
if negative:
lowerCAmelCase__ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
|
'''simple docstring'''
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = None
__UpperCAmelCase: Tuple = None
__UpperCAmelCase: List[Any] = graph
self._normalize_graph(snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = len(snake_case_ )
__UpperCAmelCase: List[str] = None
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if sources is int:
__UpperCAmelCase: List[Any] = [sources]
if sinks is int:
__UpperCAmelCase: Optional[Any] = [sinks]
if len(snake_case_ ) == 0 or len(snake_case_ ) == 0:
return
__UpperCAmelCase: Any = sources[0]
__UpperCAmelCase: int = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(snake_case_ ) > 1 or len(snake_case_ ) > 1:
__UpperCAmelCase: Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__UpperCAmelCase: List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__UpperCAmelCase: Tuple = max_input_flow
__UpperCAmelCase: Any = 0
__UpperCAmelCase: Tuple = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__UpperCAmelCase: Tuple = max_input_flow
__UpperCAmelCase: Tuple = size - 1
def lowercase_ ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = algorithm(self )
class a :
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = flow_network
__UpperCAmelCase: Dict = flow_network.verticesCount
__UpperCAmelCase: List[Any] = flow_network.sourceIndex
__UpperCAmelCase: int = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__UpperCAmelCase: Dict = flow_network.graph
__UpperCAmelCase: Optional[int] = False
def lowercase_ ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__UpperCAmelCase: str = True
def lowercase_ ( self ):
'''simple docstring'''
pass
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
# use this to save your result
__UpperCAmelCase: int = -1
def lowercase_ ( self ):
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
__UpperCAmelCase: Union[str, Any] = [0] * self.verticies_count
__UpperCAmelCase: Tuple = [0] * self.verticies_count
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__UpperCAmelCase: int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__UpperCAmelCase: List[Any] = 0
while i < len(snake_case_ ):
__UpperCAmelCase: Optional[int] = vertices_list[i]
__UpperCAmelCase: Optional[int] = self.heights[vertex_index]
self.process_vertex(snake_case_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(snake_case_ ) )
__UpperCAmelCase: Union[str, Any] = 0
else:
i += 1
__UpperCAmelCase: Tuple = sum(self.preflow[self.source_index] )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(snake_case_ , snake_case_ )
self.relabel(snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__UpperCAmelCase: str = self.heights[to_index]
if min_height is not None:
__UpperCAmelCase: Optional[Any] = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [0]
SCREAMING_SNAKE_CASE_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE_ = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 523
| 0
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
return "".join(chr(ord(UpperCamelCase ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case_ ():
'''simple docstring'''
if os.name == "nt":
_a = CursorInfo()
_a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
_a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case_ ():
'''simple docstring'''
if os.name == "nt":
_a = CursorInfo()
_a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
_a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case_ ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 377
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE__ : Optional[int] = """"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
SCREAMING_SNAKE_CASE__ : Any = """"""
SCREAMING_SNAKE_CASE__ : List[Any] = 1 # (0 is vertical, 1 is horizontal)
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_dataset(__lowerCamelCase , __lowerCamelCase )
print("""Processing...""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = update_image_and_anno(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for index, image in enumerate(__lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase__ : Tuple = random_chars(32 )
UpperCAmelCase__ : List[str] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
UpperCAmelCase__ : Optional[Any] = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__lowerCamelCase )} with {file_name}" )
UpperCAmelCase__ : str = []
for anno in new_annos[index]:
UpperCAmelCase__ : List[str] = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__lowerCamelCase )
with open(F"/{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
UpperCAmelCase__ : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
UpperCAmelCase__ : Tuple = in_file.readlines()
UpperCAmelCase__ : Optional[Any] = os.path.join(__lowerCamelCase , F"{label_name}.jpg" )
UpperCAmelCase__ : List[str] = []
for obj_list in obj_lists:
UpperCAmelCase__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 ) -> tuple[list, list, list]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Tuple = []
for idx in range(len(__lowerCamelCase ) ):
UpperCAmelCase__ : str = []
UpperCAmelCase__ : str = img_list[idx]
path_list.append(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = anno_list[idx]
UpperCAmelCase__ : Any = cva.imread(__lowerCamelCase )
if flip_type == 1:
UpperCAmelCase__ : str = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
UpperCAmelCase__ : Tuple = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase__ : int = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
UpperCAmelCase__ : Any = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCamelCase )
new_imgs_list.append(__lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _lowerCamelCase ( __lowerCamelCase = 32 ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase__ : int = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 79
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : Any = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 1
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : str = TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase__ : str = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ : Optional[Any] = self.image_size // 2
UpperCAmelCase__ : List[str] = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ : List[Any] = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase__ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = self.type_sequence_label_size
UpperCAmelCase__ : List[Any] = TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ : Tuple = self.image_size // 2
UpperCAmelCase__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = TFViTModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase__ : List[Any] = self.default_image_processor
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase__ : int = model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase__ : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 79
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = generate_pascal_triangle(SCREAMING_SNAKE_CASE )
for row_idx in range(SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = []
for current_row_idx in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
triangle.append(SCREAMING_SNAKE_CASE )
return triangle
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase :int = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ):
calculate_current_element(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return current_row
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :Dict = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase :List[Any] = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase :List[Any] = above_to_left_elt + above_to_right_elt
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = [0] + result[-1] + [0]
__UpperCamelCase :Any = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase :Optional[Any] = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) )
__UpperCamelCase :Union[str, Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase :List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase :List[str] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE )
return result
def lowerCamelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase :List[str] = f"""{func.__name__}({value})"""
__UpperCamelCase :Optional[int] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 716
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=8 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=16 , __lowercase=5 , __lowercase=2 , __lowercase=36 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> List[str]:
__UpperCamelCase :Union[str, Any] = parent
__UpperCamelCase :str = batch_size
__UpperCamelCase :Union[str, Any] = seq_length
__UpperCamelCase :Optional[Any] = is_training
__UpperCamelCase :Union[str, Any] = use_input_mask
__UpperCamelCase :Any = use_token_type_ids
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = vocab_size
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :Tuple = num_attention_heads
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :Optional[Any] = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :str = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :int = type_vocab_size
__UpperCamelCase :Optional[int] = type_sequence_label_size
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :List[str] = num_labels
__UpperCamelCase :Dict = num_choices
__UpperCamelCase :Union[str, Any] = scope
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :List[Any] = None
if self.use_input_mask:
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Union[str, Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCamelCase :List[str] = None
__UpperCamelCase :Tuple = None
__UpperCamelCase :Union[str, Any] = None
if self.use_labels:
__UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = self.get_config()
__UpperCamelCase :List[str] = 300
return config
def UpperCamelCase__ ( self) -> int:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Tuple = self.prepare_config_and_inputs()
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :int = MraModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :Tuple = model(__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :str = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Tuple = True
__UpperCamelCase :Dict = MraModel(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :int = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__UpperCamelCase :Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , )
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Dict = MraForMaskedLM(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Tuple = MraForQuestionAnswering(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :List[Any] = self.num_labels
__UpperCamelCase :str = MraForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[int]:
__UpperCamelCase :str = self.num_labels
__UpperCamelCase :Optional[int] = MraForTokenClassification(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = self.num_choices
__UpperCamelCase :Dict = MraForMultipleChoice(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Union[str, Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :str = config_and_inputs
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = False
a__ : Optional[int] = False
a__ : str = False
a__ : Optional[Any] = False
a__ : Union[str, Any] = ()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = MraModelTester(self)
__UpperCamelCase :Tuple = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase :Optional[int] = type
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :List[Any] = MraModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@unittest.skip(reason='''MRA does not output attentions''')
def UpperCamelCase__ ( self) -> Any:
return
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''')
__UpperCamelCase :Union[str, Any] = torch.arange(256).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :List[Any] = model(__lowercase)[0]
__UpperCamelCase :Dict = torch.Size((1, 256, 768))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Optional[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''')
__UpperCamelCase :str = torch.arange(256).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :Optional[Any] = model(__lowercase)[0]
__UpperCamelCase :List[str] = 50_265
__UpperCamelCase :List[Any] = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Optional[Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''')
__UpperCamelCase :Optional[int] = torch.arange(4_096).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :Tuple = model(__lowercase)[0]
__UpperCamelCase :Optional[int] = 50_265
__UpperCamelCase :Optional[Any] = torch.Size((1, 4_096, vocab_size))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :List[str] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
| 452
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Optional[Any]=30 , __UpperCAmelCase : int=2 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Union[str, Any]=32 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=2 , ) ->int:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = type_sequence_label_size
a = initializer_range
a = scope
a = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a = (image_size // patch_size) ** 2
a = num_patches + 2
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a = TFDeiTModel(config=_a )
a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->List[Any]:
"""simple docstring"""
a = TFDeiTForMaskedImageModeling(config=_a )
a = model(_a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a = 1
a = TFDeiTForMaskedImageModeling(_a )
a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a = model(_a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ) ->Any:
"""simple docstring"""
a = self.type_sequence_label_size
a = TFDeiTForImageClassification(_a )
a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a = 1
a = TFDeiTForImageClassification(_a )
a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = TFDeiTModelTester(self )
a = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(_a )
a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Tuple=False ) ->Union[str, Any]:
"""simple docstring"""
a = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFDeiTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _a ( ) -> Union[str, Any]:
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
a = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=_a , return_tensors='''tf''' )
# forward pass
a = model(**_a )
# verify the logits
a = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
a = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 117
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs
| 691
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 384
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ):
lowerCamelCase_ = []
for k, v in d.items():
lowerCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
lowerCamelCase_ = argparse.Namespace()
with open(__UpperCamelCase ,'r' ) as yaml_file:
try:
lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = MobileViTVaConfig()
lowerCamelCase_ = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase_ = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase_ = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase_ = 1_51
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = True
elif task_name.startswith('voc_' ):
lowerCamelCase_ = 21
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'pascal-voc-id2label.json'
lowerCamelCase_ = True
# orig_config
lowerCamelCase_ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = dct.pop(__UpperCamelCase )
lowerCamelCase_ = val
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
if base_model:
lowerCamelCase_ = ''
else:
lowerCamelCase_ = 'mobilevitv2.'
lowerCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase_ = k[8:]
else:
lowerCamelCase_ = k
if ".block." in k:
lowerCamelCase_ = k_new.replace('.block.' ,'.' )
if ".conv." in k:
lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase_ = [0, 1]
elif i == 4:
lowerCamelCase_ = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase_ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
lowerCamelCase_ = False
else:
lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
lowerCamelCase_ = False
# remove and rename some keys of load the original model
lowerCamelCase_ = checkpoint
remove_unused_keys(__UpperCamelCase )
lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' )
lowerCamelCase_ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 384
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __UpperCAmelCase ( __a : List[str] ,__a : Dict ) -> List[str]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_a : Tuple = flax_key_tuple[:-1] + ('''weight''',)
_a : Union[str, Any] = torch.permute(__a ,(0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
_a : Optional[Any] = flax_key_tuple[:-1] + ('''weight''',)
_a : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_a : Tuple = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __UpperCAmelCase ( __a : int ,__a : List[str] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
if "metadata" in layer:
_a : Optional[Any] = layer.split('''metadata''' )
_a : Optional[int] = ''''''.join(split_layer[0] )[:-1]
_a : Dict = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_a : List[Any] = layer.split('''kvstore''' )
_a : List[str] = ''''''.join(split_layer[0] )[:-1]
_a : str = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_a : Tuple = layer.split('''/''' )
_a : Any = '''/'''.join(split_layer[:-1] )
_a : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_a : Tuple = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_a : str = '''file'''
else:
_a : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ) -> Optional[Any]:
"""simple docstring"""
_a : int = rename_keys(__a )
_a : Any = {}
for k, v in current_block.items():
_a : Dict = v
_a : int = new_current_block
torch.save(__a ,__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : int ,__a : str ,__a : int ,__a : str = WEIGHTS_NAME ) -> Any:
"""simple docstring"""
_a : Optional[Any] = convert_file_size_to_int(__a )
_a : List[str] = []
_a : List[Any] = {}
_a : int = 0
_a : List[Any] = 0
os.makedirs(__a ,exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' ,'''rb''' ) as fp:
_a : List[Any] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_a : Union[str, Any] = flatten_dict(__a ,sep='''/''' )
_a : Optional[int] = {}
for layer in checkpoint_info.keys():
_a , _a , _a : int = get_key_and_tensorstore_dict(
__a ,__a ,__a )
if curr_real_layer_name in all_layers:
_a : Any = content
else:
_a : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_a : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_a : Optional[Any] = torch.tensor(__a )
_a : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_a , _a : str = rename_base_flax_keys(tuple(key.split('''/''' ) ) ,__a )
_a : int = '''/'''.join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_a : Union[str, Any] = os.path.join(
__a ,weights_name.replace('''.bin''' ,F"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a ,__a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_a : List[Any] = {}
_a : Optional[int] = 0
_a : Dict = raw_weights.to(getattr(__a ,__a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_a : Dict = os.path.join(__a ,weights_name.replace('''.bin''' ,F"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a ,__a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_a : int = {}
_a : Any = {}
for idx, shard in enumerate(__a ):
_a : int = weights_name.replace(
'''.bin''' ,F"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_a : Optional[Any] = os.path.join(__a ,weights_name.replace('''.bin''' ,F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a ,os.path.join(__a ,__a ) )
_a : Optional[Any] = shard
for key in shard:
_a : str = shard_file
# Add the metadata
_a : str = {'''total_size''': total_size}
_a : str = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__a ,__a ) ,'''w''' ,encoding='''utf-8''' ) as f:
_a : Optional[int] = json.dumps(__a ,indent=2 ,sort_keys=__a ) + '''\n'''
f.write(__a )
return metadata, index
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
a__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_a : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_a : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' ,device_map='''auto''' )
_a : Any = TaTokenizer.from_pretrained('''t5-small''' )
_a : Optional[int] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_a : str = tokenizer(__a ,return_tensors='''pt''' ).input_ids
_a : Any = model.generate(__a ,decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 14
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Dict = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''rwkv'''
__A = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , _lowerCAmelCase=5_0277 , _lowerCAmelCase=1024 , _lowerCAmelCase=4096 , _lowerCAmelCase=32 , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=6 , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = context_length
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase = layer_norm_epsilon
lowercase = rescale_every
lowercase = use_cache
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(
tie_word_embeddings=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653
| 1
|
from math import sqrt
def _a ( a :int ) -> bool:
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' must been an int and positive"
a = True
# 0 and 1 are none primes.
if number <= 1:
a = False
for divisor in range(2 , int(round(sqrt(snake_case_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
a = False
break
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'status' must been from type bool"
return status
def _a ( a :Tuple ) -> int:
assert isinstance(snake_case_ , snake_case_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
a = list(range(2 , n + 1 ) )
a = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(snake_case_ ) ):
for j in range(i + 1 , len(snake_case_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
a = 0
# filters actual prime numbers.
a = [x for x in begin_list if x != 0]
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def _a ( a :List[Any] ) -> List[str]:
assert isinstance(snake_case_ , snake_case_ ) and (n > 2), "'N' must been an int and > 2"
a = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(snake_case_ ):
ans.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def _a ( a :Dict ) -> List[Any]:
assert isinstance(snake_case_ , snake_case_ ) and number >= 0, "'number' must been an int and >= 0"
a = [] # this list will be returns of the function.
# potential prime number factors.
a = 2
a = number
if number == 0 or number == 1:
ans.append(snake_case_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(snake_case_ ):
while quotient != 1:
if is_prime(snake_case_ ) and (quotient % factor == 0):
ans.append(snake_case_ )
quotient /= factor
else:
factor += 1
else:
ans.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def _a ( a :Optional[int] ) -> int:
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
a = 0
# prime factorization of 'number'
a = prime_factorization(snake_case_ )
a = max(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type int"
return ans
def _a ( a :Optional[Any] ) -> List[Any]:
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
a = 0
# prime factorization of 'number'
a = prime_factorization(snake_case_ )
a = min(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type int"
return ans
def _a ( a :int ) -> Optional[Any]:
assert isinstance(snake_case_ , snake_case_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , snake_case_ ), "compare bust been from type bool"
return number % 2 == 0
def _a ( a :Optional[int] ) -> Any:
assert isinstance(snake_case_ , snake_case_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , snake_case_ ), "compare bust been from type bool"
return number % 2 != 0
def _a ( a :str ) -> Tuple:
assert (
isinstance(snake_case_ , snake_case_ ) and (number > 2) and is_even(snake_case_ )
), "'number' must been an int, even and > 2"
a = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
a = get_prime_numbers(snake_case_ )
a = len(snake_case_ )
# run variable for while-loops.
a = 0
a = None
# exit variable. for break up the loops
a = True
while i < len_pn and loop:
a = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
a = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (len(snake_case_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _a ( a :Tuple , a :List[Any] ) -> Optional[Any]:
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
a = 0
while numbera != 0:
a = numbera % numbera
a = numbera
a = rest
# precondition
assert isinstance(snake_case_ , snake_case_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _a ( a :Optional[Any] , a :str ) -> Optional[Any]:
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
a = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
a = prime_factorization(snake_case_ )
a = prime_factorization(snake_case_ )
elif numbera == 1 or numbera == 1:
a = []
a = []
a = max(snake_case_ , snake_case_ )
a = 0
a = 0
a = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
a = prime_fac_a.count(snake_case_ )
a = prime_fac_a.count(snake_case_ )
for _ in range(max(snake_case_ , snake_case_ ) ):
ans *= n
else:
a = prime_fac_a.count(snake_case_ )
for _ in range(snake_case_ ):
ans *= n
done.append(snake_case_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
a = prime_fac_a.count(snake_case_ )
for _ in range(snake_case_ ):
ans *= n
done.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _a ( a :Tuple ) -> Optional[int]:
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'number' must been a positive int"
a = 0
a = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(snake_case_ ):
ans += 1
# precondition
assert isinstance(snake_case_ , snake_case_ ) and is_prime(
snake_case_ ), "'ans' must been a prime number and from type int"
return ans
def _a ( a :int , a :List[str] ) -> Dict:
assert (
is_prime(snake_case_ ) and is_prime(snake_case_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
a = p_number_a + 1 # jump to the next number
a = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(snake_case_ ):
number += 1
while number < p_number_a:
ans.append(snake_case_ )
number += 1
# fetch the next prime number.
while not is_prime(snake_case_ ):
number += 1
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and ans[0] != p_number_a
and ans[len(snake_case_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _a ( a :Tuple ) -> Tuple:
assert isinstance(snake_case_ , snake_case_ ) and (n >= 1), "'n' must been int and >= 1"
a = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(snake_case_ )
# precondition
assert ans[0] == 1 and ans[len(snake_case_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _a ( a :Union[str, Any] ) -> List[Any]:
assert isinstance(snake_case_ , snake_case_ ) and (
number > 1
), "'number' must been an int and >= 1"
a = get_divisors(snake_case_ )
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (divisors[0] == 1)
and (divisors[len(snake_case_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _a ( a :int , a :Optional[int] ) -> Optional[Any]:
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
a = gcd(abs(snake_case_ ) , abs(snake_case_ ) )
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _a ( a :int ) -> Optional[Any]:
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'n' must been a int and >= 0"
a = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _a ( a :Optional[Any] ) -> str:
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'n' must been an int and >= 0"
a = 0
a = 1
a = 1 # this will be return
for _ in range(n - 1 ):
a = ans
ans += fiba
a = tmp
return ans
| 117
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'wav2vec2'
def __init__(self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=320 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=100 , lowerCamelCase=256 , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase="sum" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=(512, 512, 512, 512, 1_500) , lowerCamelCase=(5, 3, 3, 1, 1) , lowerCamelCase=(1, 2, 3, 1, 1) , lowerCamelCase=512 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = do_stable_layer_norm
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase = num_codevectors_per_group
_lowerCAmelCase = num_codevector_groups
_lowerCAmelCase = contrastive_logits_temperature
_lowerCAmelCase = feat_quantizer_dropout
_lowerCAmelCase = num_negatives
_lowerCAmelCase = codevector_dim
_lowerCAmelCase = proj_codevector_dim
_lowerCAmelCase = diversity_loss_weight
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
_lowerCAmelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = list(lowerCamelCase )
_lowerCAmelCase = xvector_output_dim
@property
def A__ (self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 156
| 0
|
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return int(input_a == input_a == 0 )
def __UpperCamelCase ( ):
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(f"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(f"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(f"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : str = "focalnet"
def __init__(self : str , snake_case_ : List[str]=2_2_4 , snake_case_ : Dict=4 , snake_case_ : Union[str, Any]=3 , snake_case_ : int=9_6 , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , snake_case_ : int=[2, 2, 6, 2] , snake_case_ : List[str]=[2, 2, 2, 2] , snake_case_ : Any=[3, 3, 3, 3] , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=4.0 , snake_case_ : List[str]=0.0 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[Any]=False , snake_case_ : int=1E-4 , snake_case_ : Dict=False , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=False , snake_case_ : str=0.02 , snake_case_ : List[str]=1E-5 , snake_case_ : Tuple=3_2 , snake_case_ : str=None , snake_case_ : Dict=None , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ )
__a : Tuple = image_size
__a : List[str] = patch_size
__a : List[str] = num_channels
__a : Any = embed_dim
__a : List[Any] = use_conv_embed
__a : Any = hidden_sizes
__a : Optional[Any] = depths
__a : Tuple = focal_levels
__a : Optional[Any] = focal_windows
__a : List[Any] = hidden_act
__a : Dict = mlp_ratio
__a : Tuple = hidden_dropout_prob
__a : List[Any] = drop_path_rate
__a : List[Any] = use_layerscale
__a : Optional[int] = layerscale_value
__a : Any = use_post_layernorm
__a : List[Any] = use_post_layernorm_in_modulation
__a : Any = normalize_modulator
__a : List[Any] = initializer_range
__a : List[Any] = layer_norm_eps
__a : List[Any] = encoder_stride
__a : Union[str, Any] = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : List[str] = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 326
| 0
|
def _lowerCAmelCase ( UpperCamelCase__: str ) -> str:
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(UpperCamelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 641
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=__snake_case ):
"""simple docstring"""
lowerCAmelCase = ['note_seq']
def __init__( self , *a__ , **a__ ) -> Optional[int]:
requires_backends(self , ["""note_seq"""] )
@classmethod
def _UpperCAmelCase ( cls , *a__ , **a__ ) -> Dict:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _UpperCAmelCase ( cls , *a__ , **a__ ) -> Optional[int]:
requires_backends(cls , ["""note_seq"""] )
| 641
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Optional[Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "trocr"
__UpperCamelCase : int = ["past_key_values"]
__UpperCamelCase : List[str] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Optional[Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE :Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE :Optional[Any]=1_2 , SCREAMING_SNAKE_CASE :Optional[int]=1_6 , SCREAMING_SNAKE_CASE :Any=4_0_9_6 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Tuple=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :Optional[Any]=0.02 , SCREAMING_SNAKE_CASE :Any=0.0 , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :int=2 , **SCREAMING_SNAKE_CASE :Any , ) -> Any:
'''simple docstring'''
_a : Optional[Any] =vocab_size
_a : Optional[int] =d_model
_a : Union[str, Any] =decoder_layers
_a : Optional[int] =decoder_attention_heads
_a : Any =decoder_ffn_dim
_a : Union[str, Any] =activation_function
_a : Dict =max_position_embeddings
_a : Union[str, Any] =dropout
_a : Union[str, Any] =attention_dropout
_a : int =activation_dropout
_a : Optional[Any] =init_std
_a : Optional[int] =decoder_layerdrop
_a : Optional[Any] =use_cache
_a : Tuple =scale_embedding
_a : int =use_learned_position_embeddings
_a : Union[str, Any] =layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 506
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
A__: Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A__: Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : List[str] =state_dict.pop(_UpperCAmelCase )
_a : Tuple =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[str]:
_a : Optional[Any] =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_a : List[str] =key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" )
_a : int =value
else:
_a : Any =value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> int:
_a : List[str] =""""""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a : int =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_a : Optional[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : str =in_proj_weight[:256, :]
_a : List[str] =in_proj_bias[:256]
_a : Optional[int] =in_proj_weight[256:512, :]
_a : List[str] =in_proj_bias[256:512]
_a : Optional[int] =in_proj_weight[-256:, :]
_a : Tuple =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a : Tuple =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_a : str =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] =in_proj_weight[:256, :]
_a : List[Any] =in_proj_bias[:256]
_a : Tuple =in_proj_weight[256:512, :]
_a : str =in_proj_bias[256:512]
_a : Any =in_proj_weight[-256:, :]
_a : int =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a : Any =state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_a : int =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a : int =in_proj_weight_cross_attn[:256, :]
_a : Any =in_proj_bias_cross_attn[:256]
_a : str =in_proj_weight_cross_attn[256:512, :]
_a : Dict =in_proj_bias_cross_attn[256:512]
_a : Any =in_proj_weight_cross_attn[-256:, :]
_a : Union[str, Any] =in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Any ) -> int:
_a , _a : Union[str, Any] =image.size
_a : Dict =max(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] =800 if """detection""" in checkpoint_url else 1000
_a : Any =target_max_size / current_max_size
_a : int =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> int:
_a : Optional[Any] =F.to_tensor(_UpperCAmelCase )
_a : Tuple =F.normalize(_UpperCAmelCase ,mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ) -> Optional[int]:
logger.info("""Converting model...""" )
# load original state dict
_a : Dict =torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] =rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a : Dict ="""model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_a : Any =state_dict.pop(_UpperCAmelCase )
_a : List[Any] =val
# create HuggingFace model and load state dict
_a : int =TableTransformerConfig(
backbone="""resnet18""" ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
_a : Union[str, Any] =15
_a : Tuple =2
_a : Optional[Any] ={0: """table""", 1: """table rotated"""}
_a : Tuple =idalabel
_a : List[Any] ={v: k for k, v in idalabel.items()}
else:
_a : Union[str, Any] =125
_a : int =6
_a : int ={
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_a : List[str] =idalabel
_a : Optional[int] ={v: k for k, v in idalabel.items()}
_a : Optional[int] =DetrImageProcessor(
format="""coco_detection""" ,max_size=800 if """detection""" in checkpoint_url else 1000 )
_a : Optional[Any] =TableTransformerForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion
_a : List[Any] ="""example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_a : str =hf_hub_download(repo_id="""nielsr/example-pdf""" ,repo_type="""dataset""" ,filename=_UpperCAmelCase )
_a : Tuple =Image.open(_UpperCAmelCase ).convert("""RGB""" )
_a : Dict =normalize(resize(_UpperCAmelCase ,_UpperCAmelCase ) ).unsqueeze(0 )
_a : List[str] =model(_UpperCAmelCase )
if "detection" in checkpoint_url:
_a : Any =(1, 15, 3)
_a : int =torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
_a : str =torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
_a : str =(1, 125, 7)
_a : str =torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
_a : int =torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_a : Dict =(
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_UpperCAmelCase )
image_processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__: Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 506
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 8
|
"""simple docstring"""
import math
import qiskit
def lowercase ( __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : int = 1 ):
if (
isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowercase_ : List[Any] = qiskit.QuantumRegister(4 , '''qr''' )
lowercase_ : Dict = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowercase_ : Optional[Any] = [input_a, input_a, carry_in]
lowercase_ : List[str] = qiskit.QuantumCircuit(__snake_case , __snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __snake_case ) # measure the last two qbits
lowercase_ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
lowercase_ : Optional[int] = qiskit.execute(__snake_case , __snake_case , shots=1_0_0_0 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 231
| 0
|
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__a = Mapping[str, np.ndarray]
__a = Mapping[str, Any] # Is a nested dict.
__a = 0.01
@dataclasses.dataclass(frozen=_lowerCAmelCase )
class lowerCamelCase :
'''simple docstring'''
_A : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_A : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_A : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_A : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_A : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_A : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_A : Optional[str] = None
# Templates used to generate this protein (prediction-only)
_A : Optional[Sequence[str]] = None
# Chain corresponding to each parent
_A : Optional[Sequence[int]] = None
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = r"""(\[[A-Z]+\]\n)"""
snake_case_ :List[str] = [tag.strip() for tag in re.split(_lowercase, _lowercase ) if len(_lowercase ) > 0]
snake_case_ :Iterator[Tuple[str, List[str]]] = zip(tags[0::2], [l.split("""\n""" ) for l in tags[1::2]] )
snake_case_ :List[str] = ["N", "CA", "C"]
snake_case_ :Tuple = None
snake_case_ :str = None
snake_case_ :str = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case_ :Optional[Any] = g[1][0].strip()
for i in range(len(_lowercase ) ):
if seq[i] not in residue_constants.restypes:
snake_case_ :Optional[Any] = """X""" # FIXME: strings are immutable
snake_case_ :str = np.array(
[residue_constants.restype_order.get(_lowercase, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case_ :List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowercase, g[1][axis].split() ) ) )
snake_case_ :int = np.array(_lowercase )
snake_case_ :int = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
snake_case_ :Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case_ :Any = np.array(list(map({"""-""": 0, """+""": 1}.get, g[1][0].strip() ) ) )
snake_case_ :List[str] = np.zeros(
(
len(_lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
snake_case_ :str = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowercase, atom_mask=_lowercase, aatype=_lowercase, residue_index=np.arange(len(_lowercase ) ), b_factors=_lowercase, )
def A_ ( _lowercase, _lowercase = 0 ):
'''simple docstring'''
snake_case_ :List[str] = []
snake_case_ :int = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case_ :Union[str, Any] = prot.parents
snake_case_ :List[str] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case_ :List[Any] = [p for i, p in zip(_lowercase, _lowercase ) if i == chain_id]
if parents is None or len(_lowercase ) == 0:
snake_case_ :Dict = ["""N/A"""]
pdb_headers.append(f"""PARENT {' '.join(_lowercase )}""" )
return pdb_headers
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = []
snake_case_ :str = pdb_str.split("""\n""" )
snake_case_ :List[Any] = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case_ :List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case_ :Optional[int] = []
if prot.parents_chain_index is not None:
snake_case_ :Dict[str, List[str]] = {}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(_lowercase ), [] )
parent_dict[str(_lowercase )].append(_lowercase )
snake_case_ :List[str] = max([int(_lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case_ :str = parent_dict.get(str(_lowercase ), ["""N/A"""] )
parents_per_chain.append(_lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case_ :Any = [["""N/A"""]]
def make_parent_line(_lowercase ) -> str:
return f"""PARENT {' '.join(_lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case_ :List[str] = 0
for i, l in enumerate(_lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowercase ):
snake_case_ :Dict = parents_per_chain[chain_counter]
else:
snake_case_ :Optional[int] = ["""N/A"""]
out_pdb_lines.append(make_parent_line(_lowercase ) )
return "\n".join(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = residue_constants.restypes + ["""X"""]
def res_atoa(_lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r], """UNK""" )
snake_case_ :Tuple = residue_constants.atom_types
snake_case_ :List[str] = []
snake_case_ :Dict = prot.atom_mask
snake_case_ :Optional[int] = prot.aatype
snake_case_ :Union[str, Any] = prot.atom_positions
snake_case_ :List[Any] = prot.residue_index.astype(np.intaa )
snake_case_ :int = prot.b_factors
snake_case_ :int = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
snake_case_ :Any = get_pdb_headers(_lowercase )
if len(_lowercase ) > 0:
pdb_lines.extend(_lowercase )
snake_case_ :Optional[int] = aatype.shape[0]
snake_case_ :Any = 1
snake_case_ :Any = 0
snake_case_ :str = string.ascii_uppercase
snake_case_ :Optional[Any] = None
# Add all atom sites.
for i in range(_lowercase ):
snake_case_ :Tuple = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowercase, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
snake_case_ :Optional[Any] = """ATOM"""
snake_case_ :List[Any] = atom_name if len(_lowercase ) == 4 else f""" {atom_name}"""
snake_case_ :Dict = """"""
snake_case_ :Any = """"""
snake_case_ :List[str] = 1.00
snake_case_ :Any = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case_ :Tuple = """"""
snake_case_ :Any = """A"""
if chain_index is not None:
snake_case_ :Tuple = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case_ :str = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_lowercase )
atom_index += 1
snake_case_ :int = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case_ :List[Any] = True
snake_case_ :Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case_ :Union[str, Any] = """TER"""
snake_case_ :Any = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowercase, _lowercase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A_ ( _lowercase, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, ):
'''simple docstring'''
return Protein(
aatype=features["""aatype"""], atom_positions=result["""final_atom_positions"""], atom_mask=result["""final_atom_mask"""], residue_index=features["""residue_index"""] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ), chain_index=_lowercase, remark=_lowercase, parents=_lowercase, parents_chain_index=_lowercase, )
| 310
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
| 1
|
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[str]=2 , snake_case_ : Tuple=3 , snake_case_ : Optional[int]=16 , snake_case_ : Union[str, Any] = 10 , snake_case_ : Tuple = 2 ) -> Optional[int]:
'''simple docstring'''
def get_dataset(snake_case_ : Any ):
UpperCAmelCase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ = get_dataset(A_ )
UpperCAmelCase_ = get_dataset(A_ )
UpperCAmelCase_ = DataLoader(A_ , shuffle=A_ , batch_size=A_ , num_workers=4 )
UpperCAmelCase_ = DataLoader(A_ , shuffle=A_ , batch_size=A_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : str=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = []
for epoch in range(A_ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ = batch
UpperCAmelCase_ = model(A_ )
UpperCAmelCase_ = torch.nn.functional.mse_loss(A_ , A_ )
accelerator.backward(A_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __A ( nn.Module ):
def __init__(self : Any ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self : List[Any] , __a : str ):
return x * self.a + self.b
class __A ( unittest.TestCase ):
def _lowercase (self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(total_limit=1 , project_dir=__A , automatic_checkpoint_naming=__A )
# Train baseline
UpperCAmelCase_ = Accelerator(project_config=__A )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__A , __A , __A , __A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__A , __A , __A , __A )
# Save initial
UpperCAmelCase_ = os.path.join(__A , "initial" )
accelerator.save_state(__A )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
UpperCAmelCase_ = train(3 , __A , __A , __A , __A )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__A , __A , __A , __A )
accelerator.load_state(__A )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
UpperCAmelCase_ = train(2 , __A , __A , __A , __A )
# Save everything
UpperCAmelCase_ = os.path.join(__A , "checkpoint" )
accelerator.save_state(__A )
# Load everything back in and make sure all states work
accelerator.load_state(__A )
test_rands += train(1 , __A , __A , __A , __A )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
def _lowercase (self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=__A )
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=__A , project_config=__A )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__A , __A , __A , __A )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
UpperCAmelCase_ = train(3 , __A , __A , __A , __A )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__A )
UpperCAmelCase_ = Accelerator(project_dir=__A , project_config=__A )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__A , __A , __A , __A )
accelerator.load_state(os.path.join(__A , "checkpoints" , "checkpoint_0" ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
UpperCAmelCase_ = train(2 , __A , __A , __A , __A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__A , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , __A , __A , __A , __A )
((UpperCAmelCase_) , (UpperCAmelCase_)) = model.a.item(), model.b.item()
UpperCAmelCase_ = optimizer.state_dict()
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = torch.tensor([1, 2, 3] )
UpperCAmelCase_ = torch.tensor([2, 3, 4] )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ = Accelerator()
with self.assertRaises(__A ) as ve:
accelerator.register_for_checkpointing(__A , __A , __A , __A )
UpperCAmelCase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(__A , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=__A )
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=__A , project_config=__A )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__A , __A , __A , __A , __A )
# Save initial
accelerator.save_state()
UpperCAmelCase_ = scheduler.state_dict()
train(3 , __A , __A , __A , __A , __A )
self.assertNotEqual(__A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__A , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(__A , scheduler.state_dict() )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=__A , total_limit=2 )
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=__A , project_config=__A )
UpperCAmelCase_ = accelerator.prepare(__A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__A , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def _lowercase (self : Tuple ):
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict ='''/tmp/accelerate/state_checkpointing'''
SCREAMING_SNAKE_CASE_: int =DummyModel()
SCREAMING_SNAKE_CASE_: Optional[int] =torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE_: str =dummy_dataloaders()
SCREAMING_SNAKE_CASE_: List[Any] =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE_: int =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE_: Optional[Any] =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE_: int =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_: str =group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE_: List[Any] =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_: Tuple =group['''params'''][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_: int =group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 78
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A__ ( A_ ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def A__ ( A_ ) -> str:
# word like '180' or '身高' or '神'
for char in word:
_lowercase = ord(A_ )
if not _is_chinese_char(A_ ):
return 0
return 1
def A__ ( A_ ) -> int:
_lowercase = set()
for token in tokens:
_lowercase = len(A_ ) > 1 and is_chinese(A_ )
if chinese_word:
word_set.add(A_ )
_lowercase = list(A_ )
return word_list
def A__ ( A_ , A_ ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
_lowercase = max([len(A_ ) for w in chinese_word_set] )
_lowercase = bert_tokens
_lowercase , _lowercase = 0, len(A_ )
while start < end:
_lowercase = True
if is_chinese(bert_word[start] ):
_lowercase = min(end - start , A_ )
for i in range(A_ , 1 , -1 ):
_lowercase = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowercase = "##" + bert_word[j]
_lowercase = start + i
_lowercase = False
break
if single_word:
start += 1
return bert_word
def A__ ( A_ , A_ , A_ ) -> Dict:
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowercase = [get_chinese_word(A_ ) for r in res]
ltp_res.extend(A_ )
assert len(A_ ) == len(A_ )
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=A_ , truncation=A_ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(A_ ) == len(A_ )
_lowercase = []
for input_ids, chinese_word in zip(A_ , A_ ):
_lowercase = []
for id in input_ids:
_lowercase = bert_tokenizer._convert_id_to_token(A_ )
input_tokens.append(A_ )
_lowercase = add_sub_symbol(A_ , A_ )
_lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A_ ):
if token[:2] == "##":
_lowercase = token[2:]
# save chinese tokens' pos
if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ):
ref_id.append(A_ )
ref_ids.append(A_ )
assert len(A_ ) == len(A_ )
return ref_ids
def A__ ( A_ ) -> str:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_lowercase = f.readlines()
_lowercase = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowercase = LTP(args.ltp ) # faster in GPU device
_lowercase = BertTokenizer.from_pretrained(args.bert )
_lowercase = prepare_ref(A_ , A_ , A_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_lowercase = [json.dumps(A_ ) + "\n" for ref in ref_ids]
f.writelines(A_ )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__magic_name__ : Dict = parser.parse_args()
main(args)
| 497
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Optional[Any]:
a : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_heads" ) )
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=[16, 48, 96] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 10] , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> List[Any]:
a : List[str] = parent
a : Tuple = batch_size
a : str = image_size
a : List[Any] = patch_sizes
a : List[Any] = patch_stride
a : int = patch_padding
a : int = is_training
a : str = use_labels
a : List[str] = num_labels
a : str = num_channels
a : Optional[int] = embed_dim
a : Optional[Any] = num_heads
a : Tuple = stride_kv
a : Union[str, Any] = depth
a : Optional[Any] = cls_token
a : List[Any] = attention_drop_rate
a : List[str] = initializer_range
a : str = layer_norm_eps
def __a ( self ) -> Any:
a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
a : Dict = ids_tensor([self.batch_size] , self.num_labels )
a : int = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : List[str] = TFCvtModel(config=lowerCAmelCase__ )
a : Optional[Any] = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
a : List[str] = (self.image_size, self.image_size)
a : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
a : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
a : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = self.num_labels
a : Any = TFCvtForImageClassification(lowerCAmelCase__ )
a : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> Tuple:
a : List[str] = self.prepare_config_and_inputs()
a : Optional[int] = config_and_inputs
a : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : List[str] =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase : List[Any] =(
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[Any] =False
lowerCamelCase : List[str] =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : int =False
lowerCamelCase : Dict =False
def __a ( self ) -> Optional[int]:
a : Dict = TFCvtModelTester(self )
a : List[Any] = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __a ( self ) -> Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __a ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __a ( self ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __a ( self ) -> List[Any]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __a ( self ) -> Optional[int]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __a ( self ) -> int:
a : Optional[int] = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __a ( self ) -> Any:
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = model_class(lowerCAmelCase__ )
a : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __a ( self ) -> int:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a : Union[str, Any] = model_class(lowerCAmelCase__ )
a : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Dict = outputs.hidden_states
a : Any = len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = TFCvtModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->int:
'''simple docstring'''
a : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a : Any = self.default_image_processor
a : Any = prepare_img()
a : str = image_processor(images=lowerCAmelCase__ , return_tensors="tf" )
# forward pass
a : List[str] = model(**lowerCAmelCase__ )
# verify the logits
a : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a : str = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4 ) )
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
for i in range(_snake_case ):
for j in range(i + 1 , _snake_case ):
if numbers[j] < numbers[i]:
UpperCAmelCase , UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 341
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
F'''{test_file} instead.''' )
UpperCAmelCase = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
UpperCAmelCase = components[:-1] + [test_fn.replace(""".py""" , """""" )]
UpperCAmelCase = """.""".join(_snake_case )
return test_module_path
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_module_path(_snake_case )
UpperCAmelCase = importlib.import_module(_snake_case )
return test_module
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = get_test_module(_snake_case )
for attr in dir(_snake_case ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_snake_case , _snake_case ) )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = get_test_module(_snake_case )
for attr in dir(_snake_case ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase = getattr(_snake_case , """all_model_classes""" , [] )
if len(_snake_case ) > 0:
test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes(_snake_case )
UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = test_class()
if hasattr(_snake_case , """setUp""" ):
test.setUp()
UpperCAmelCase = None
if hasattr(_snake_case , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase = test.model_tester.__class__
return model_tester
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes(_snake_case )
UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes_for_model(_snake_case , _snake_case )
UpperCAmelCase = []
for test_class in test_classes:
UpperCAmelCase = get_model_tester_from_test_class(_snake_case )
if tester_class is not None:
tester_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes(_snake_case )
UpperCAmelCase = {test_class: get_model_tester_from_test_class(_snake_case ) for test_class in test_classes}
return test_tester_mapping
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_model_classes(_snake_case )
UpperCAmelCase = {
model_class: get_test_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_test_mapping
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_model_classes(_snake_case )
UpperCAmelCase = {
model_class: get_tester_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def _a ( _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return o
elif isinstance(_snake_case , _snake_case ):
return o.__name__
elif isinstance(_snake_case , (list, tuple) ):
return [to_json(_snake_case ) for x in o]
elif isinstance(_snake_case , _snake_case ):
return {to_json(_snake_case ): to_json(_snake_case ) for k, v in o.items()}
else:
return o
| 341
| 1
|
'''simple docstring'''
import json
import sys
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = results[benchmark_name]
SCREAMING_SNAKE_CASE : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = """| metric |"""
SCREAMING_SNAKE_CASE : Dict = """|--------|"""
SCREAMING_SNAKE_CASE : int = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE : Tuple = metric_vals["""new"""]
SCREAMING_SNAKE_CASE : Optional[int] = metric_vals.get("""old""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = metric_vals.get("""diff""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = f''' {new_val:f}''' if isinstance(lowerCamelCase_ , (int, float) ) else """None"""
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(lowerCamelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(lowerCamelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase_ ) )
if __name__ == "__main__":
__UpperCAmelCase = sys.argv[1]
__UpperCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 79
|
'''simple docstring'''
__UpperCAmelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 79
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 424
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __A :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=99 , _snake_case=13 , _snake_case=7 , _snake_case=9 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case=8 , _snake_case=0.1 , _snake_case=0.002 , _snake_case=1 , _snake_case=0 , _snake_case=0 , _snake_case=None , _snake_case=None , ):
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : str = encoder_seq_length
_lowerCAmelCase : List[Any] = decoder_seq_length
# For common tests
_lowerCAmelCase : Dict = self.decoder_seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_attention_mask
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Tuple = d_ff
_lowerCAmelCase : int = relative_attention_num_buckets
_lowerCAmelCase : str = dropout_rate
_lowerCAmelCase : Optional[int] = initializer_factor
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : Tuple = pad_token_id
_lowerCAmelCase : Union[str, Any] = decoder_start_token_id
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ):
if attention_mask is None:
_lowerCAmelCase : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
_lowerCAmelCase : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : Dict = self.get_config()
_lowerCAmelCase : Optional[int] = config.num_attention_heads
_lowerCAmelCase : Any = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
_lowerCAmelCase : Optional[int] = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : int = model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
_lowerCAmelCase : Union[str, Any] = model(input_ids=_snake_case , decoder_input_ids=_snake_case )
_lowerCAmelCase : Optional[int] = result.last_hidden_state
_lowerCAmelCase : Tuple = result.past_key_values
_lowerCAmelCase : Optional[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
_lowerCAmelCase : int = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
_lowerCAmelCase : Optional[Any] = model(_snake_case , use_cache=_snake_case )
_lowerCAmelCase : Union[str, Any] = model(_snake_case )
_lowerCAmelCase : Union[str, Any] = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowerCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Optional[Any] = model(_snake_case )["last_hidden_state"]
_lowerCAmelCase : int = model(_snake_case , past_key_values=_snake_case )["last_hidden_state"]
# select random slice
_lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : int = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , ):
_lowerCAmelCase : Optional[int] = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
_lowerCAmelCase : int = model(**_snake_case )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class __A ( snake_case__ ,snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : str = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=_snake_case , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs[0]
_lowerCAmelCase : List[str] = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
_lowerCAmelCase : Any = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
_lowerCAmelCase : Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowerCAmelCase : Any = torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
_lowerCAmelCase : str = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowerCAmelCase : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=_snake_case ).to(_snake_case )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=_snake_case , legacy=_snake_case )
_lowerCAmelCase : Tuple = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
_lowerCAmelCase : Tuple = tokenizer(_snake_case , return_tensors="pt" , padding=_snake_case ).input_ids
# fmt: off
_lowerCAmelCase : List[Any] = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = model.generate(input_ids.to(_snake_case ) )
_lowerCAmelCase : Dict = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 424
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : Collection[float] | None = None ):
'''simple docstring'''
if components is None:
_a : List[Any] = []
_a : str = list(_a )
def __len__( self : Dict ):
'''simple docstring'''
return len(self.__components )
def __str__( self : Any ):
'''simple docstring'''
return "(" + ",".join(map(_a ,self.__components ) ) + ")"
def __add__( self : List[Any] ,_a : Vector ):
'''simple docstring'''
_a : Tuple = len(self )
if size == len(_a ):
_a : Tuple = [self.__components[i] + other.component(_a ) for i in range(_a )]
return Vector(_a )
else:
raise Exception('must have the same size' )
def __sub__( self : List[Any] ,_a : Vector ):
'''simple docstring'''
_a : Any = len(self )
if size == len(_a ):
_a : List[str] = [self.__components[i] - other.component(_a ) for i in range(_a )]
return Vector(_a )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Union[str, Any] ,_a : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Optional[Any] ,_a : Vector ):
'''simple docstring'''
...
def __mul__( self : List[str] ,_a : float | Vector ):
'''simple docstring'''
if isinstance(_a ,(float, int) ):
_a : List[str] = [c * other for c in self.__components]
return Vector(_a )
elif isinstance(_a ,_a ) and len(self ) == len(_a ):
_a : List[str] = len(self )
_a : List[str] = [self.__components[i] * other.component(_a ) for i in range(_a )]
return sum(_a )
else: # error case
raise Exception('invalid operand!' )
def __lowercase ( self : str ):
'''simple docstring'''
return Vector(self.__components )
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if isinstance(_a ,_a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __lowercase ( self : int ,_a : int ,_a : float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_a : List[str] = value
def __lowercase ( self : Tuple ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
_a : List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_a ) )
def __lowercase ( self : Union[str, Any] ,_a : Vector ,_a : bool = False ):
'''simple docstring'''
_a : Optional[int] = self * other
_a : int = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
assert isinstance(__a , __a )
return Vector([0] * dimension )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
assert isinstance(__a , __a ) and (isinstance(__a , __a ))
_a : List[Any] = [0] * dimension
_a : Dict = 1
return Vector(__a )
def UpperCAmelCase_ (__a : float , __a : Vector , __a : Vector ):
"""simple docstring"""
assert (
isinstance(__a , __a )
and isinstance(__a , __a )
and (isinstance(__a , (int, float) ))
)
return x * scalar + y
def UpperCAmelCase_ (__a : int , __a : int , __a : int ):
"""simple docstring"""
random.seed(__a )
_a : Dict = [random.randint(__a , __a ) for _ in range(__a )]
return Vector(__a )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : list[list[float]] ,_a : int ,_a : int ):
'''simple docstring'''
_a : Any = matrix
_a : Dict = w
_a : Optional[Any] = h
def __str__( self : str ):
'''simple docstring'''
_a : Dict = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str ,_a : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_a : Union[str, Any] = []
for i in range(self.__height ):
_a : Dict = [
self.__matrix[i][j] + other.component(_a ,_a )
for j in range(self.__width )
]
matrix.append(_a )
return Matrix(_a ,self.__width ,self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict ,_a : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_a : List[Any] = []
for i in range(self.__height ):
_a : str = [
self.__matrix[i][j] - other.component(_a ,_a )
for j in range(self.__width )
]
matrix.append(_a )
return Matrix(_a ,self.__width ,self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : Dict ,_a : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Any ,_a : Vector ):
'''simple docstring'''
...
def __mul__( self : Tuple ,_a : float | Vector ):
'''simple docstring'''
if isinstance(_a ,_a ): # matrix-vector
if len(_a ) == self.__width:
_a : str = zero_vector(self.__height )
for i in range(self.__height ):
_a : Union[str, Any] = [
self.__matrix[i][j] * other.component(_a )
for j in range(self.__width )
]
ans.change_component(_a ,sum(_a ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_a ,(int, float) ): # matrix-scalar
_a : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_a ,self.__width ,self.__height )
return None
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self.__height
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self.__width
def __lowercase ( self : List[str] ,_a : int ,_a : int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self : List[str] ,_a : int ,_a : int ,_a : float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_a : List[Any] = value
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self : str ,_a : int ,_a : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
_a : Tuple = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_a ) ):
_a : str = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_a ,self.__width - 1 ,self.__height - 1 ).determinant()
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_a ,_a )
else:
raise Exception('Indices out of bounds' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_a : str = [
self.__matrix[0][y] * self.cofactor(0 ,_a ) for y in range(self.__width )
]
return sum(_a )
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : list[list[float]] = [[0] * n for _ in range(__a )]
return Matrix(__a , __a , __a )
def UpperCAmelCase_ (__a : int , __a : int , __a : int , __a : int ):
"""simple docstring"""
random.seed(__a )
_a : list[list[float]] = [
[random.randint(__a , __a ) for _ in range(__a )] for _ in range(__a )
]
return Matrix(__a , __a , __a )
| 319
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Union[str, Any] ,_a : Union[str, Any]=13 ,_a : Any=32 ,_a : Optional[Any]=2 ,_a : Any=3 ,_a : str=16 ,_a : Tuple=[1, 2, 1] ,_a : Tuple=[2, 2, 4] ,_a : Any=2 ,_a : Optional[int]=2.0 ,_a : List[Any]=True ,_a : str=0.0 ,_a : Tuple=0.0 ,_a : Optional[Any]=0.1 ,_a : Dict="gelu" ,_a : Union[str, Any]=False ,_a : Any=True ,_a : Any=0.02 ,_a : List[Any]=1E-5 ,_a : Any=True ,_a : List[str]=None ,_a : str=True ,_a : Optional[int]=10 ,_a : List[str]=8 ,):
'''simple docstring'''
_a : Dict = parent
_a : str = batch_size
_a : Optional[int] = image_size
_a : str = patch_size
_a : Optional[int] = num_channels
_a : List[Any] = embed_dim
_a : Optional[Any] = depths
_a : Optional[int] = num_heads
_a : str = window_size
_a : Any = mlp_ratio
_a : Optional[Any] = qkv_bias
_a : Optional[Any] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Union[str, Any] = drop_path_rate
_a : Union[str, Any] = hidden_act
_a : Union[str, Any] = use_absolute_embeddings
_a : str = patch_norm
_a : Tuple = layer_norm_eps
_a : List[Any] = initializer_range
_a : Optional[int] = is_training
_a : str = scope
_a : List[str] = use_labels
_a : int = type_sequence_label_size
_a : List[str] = encoder_stride
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : List[Any] = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : str ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ,_a : Any ):
'''simple docstring'''
_a : List[Any] = SwinvaModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a )
_a : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def __lowercase ( self : Union[str, Any] ,_a : List[str] ,_a : Tuple ,_a : List[Any] ):
'''simple docstring'''
_a : int = SwinvaForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a : List[Any] = 1
_a : str = SwinvaForMaskedImageModeling(_a )
model.to(_a )
model.eval()
_a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self : Any ,_a : List[Any] ,_a : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.type_sequence_label_size
_a : str = SwinvaForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCAmelCase : Any = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = False
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = SwinvaModelTester(self )
_a : Any = ConfigTester(self ,config_class=_a ,embed_dim=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = True
for model_class in self.all_model_classes:
_a : Optional[Any] = True
_a : List[str] = False
_a : Tuple = True
_a : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.attentions
_a : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(_a ) ,_a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Optional[int] = True
_a : Dict = config.window_size**2
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : Union[str, Any] = outputs.attentions
self.assertEqual(len(_a ) ,_a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_a : str = len(_a )
# Check attention is always last and order is fine
_a : int = True
_a : int = True
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
_a : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_a : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(_a ) )
_a : str = outputs.attentions
self.assertEqual(len(_a ) ,_a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def __lowercase ( self : Optional[int] ,_a : str ,_a : Union[str, Any] ,_a : Any ,_a : Union[str, Any] ):
'''simple docstring'''
_a : int = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.hidden_states
_a : str = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) ,_a )
# Swinv2 has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_a : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(_a ) ,_a )
_a, _a, _a, _a : Optional[int] = reshaped_hidden_states[0].shape
_a : str = (
reshaped_hidden_states[0].view(_a ,_a ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : List[Any] = True
self.check_hidden_states_output(_a ,_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Any = True
self.check_hidden_states_output(_a ,_a ,_a ,_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[Any] = 3
_a : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : int = True
self.check_hidden_states_output(_a ,_a ,_a ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
self.check_hidden_states_output(_a ,_a ,_a ,(padded_height, padded_width) )
def __lowercase ( self : str ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : str = SwinvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : int = model_class(config=_a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_a )
_a : Union[str, Any] = self.default_image_processor
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Dict = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Dict = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : int = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 319
| 1
|
import numpy
class lowerCamelCase :
def __init__( self : Optional[int] , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ) -> None:
_a : List[str] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_a : Union[str, Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_a : Any = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_a : Optional[int] = numpy.random.rand(3 , 1 )
# Real output values provided.
_a : Any = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_a : List[Any] = numpy.zeros(output_array.shape )
def snake_case_ ( self : List[str] ) -> numpy.ndarray:
_a : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_a : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_a : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case_ ( self : Union[str, Any] ) -> None:
_a : Optional[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_a : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_a : str = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case_ ( self : List[str] , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
_a : Dict = self.feedforward()
self.back_propagation()
if give_loss:
_a : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def snake_case_ ( self : Optional[int] , __snake_case : numpy.ndarray ) -> int:
_a : Union[str, Any] = input_arr
_a : Tuple = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_a : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_a : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase_ ( UpperCamelCase_ ):
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase_ ( UpperCamelCase_ ):
return (value) * (1 - (value))
def lowerCamelCase_ ( ):
_a : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_a : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_a : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase_ , output_array=UpperCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase_ , iterations=10 , give_loss=UpperCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 471
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = 'data2vec-audio'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Tuple=768 , __snake_case : List[str]=12 , __snake_case : Any=12 , __snake_case : Optional[int]=3072 , __snake_case : List[str]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : int=0.1 , __snake_case : Optional[Any]=0.0 , __snake_case : str=0.1 , __snake_case : int=0.1 , __snake_case : List[str]=0.02 , __snake_case : int=1E-5 , __snake_case : List[str]="gelu" , __snake_case : Any=(512, 512, 512, 512, 512, 512, 512) , __snake_case : Tuple=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Dict=(10, 3, 3, 3, 3, 2, 2) , __snake_case : Any=False , __snake_case : Tuple=16 , __snake_case : List[Any]=19 , __snake_case : Optional[int]=5 , __snake_case : List[Any]=0.05 , __snake_case : Union[str, Any]=10 , __snake_case : List[str]=2 , __snake_case : Tuple=0.0 , __snake_case : int=10 , __snake_case : Union[str, Any]=0 , __snake_case : int="sum" , __snake_case : Dict=False , __snake_case : Dict=False , __snake_case : Optional[int]=256 , __snake_case : Optional[int]=(512, 512, 512, 512, 1500) , __snake_case : Union[str, Any]=(5, 3, 3, 1, 1) , __snake_case : int=(1, 2, 3, 1, 1) , __snake_case : Optional[int]=512 , __snake_case : str=0 , __snake_case : Optional[int]=1 , __snake_case : Optional[int]=2 , __snake_case : Optional[int]=False , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=2 , __snake_case : List[Any]=3 , __snake_case : List[str]=None , **__snake_case : List[str] , ) -> Tuple:
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
_a : List[Any] = hidden_size
_a : List[str] = feat_extract_activation
_a : Optional[int] = list(__snake_case )
_a : List[Any] = list(__snake_case )
_a : Dict = list(__snake_case )
_a : List[Any] = conv_bias
_a : List[Any] = num_conv_pos_embeddings
_a : Optional[Any] = num_conv_pos_embedding_groups
_a : Dict = conv_pos_kernel_size
_a : List[Any] = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : Any = intermediate_size
_a : List[Any] = hidden_act
_a : List[Any] = num_attention_heads
_a : Union[str, Any] = hidden_dropout
_a : Dict = attention_dropout
_a : List[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : Optional[int] = final_dropout
_a : str = layerdrop
_a : Optional[int] = layer_norm_eps
_a : Any = initializer_range
_a : Optional[int] = vocab_size
_a : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : List[Any] = mask_time_prob
_a : Optional[int] = mask_time_length
_a : Any = mask_time_min_masks
_a : Optional[Any] = mask_feature_prob
_a : List[Any] = mask_feature_length
_a : Optional[Any] = mask_feature_min_masks
# ctc loss
_a : Optional[Any] = ctc_loss_reduction
_a : Tuple = ctc_zero_infinity
# adapter
_a : Tuple = add_adapter
_a : Optional[int] = adapter_kernel_size
_a : Any = adapter_stride
_a : List[str] = num_adapter_layers
_a : Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a : Optional[int] = list(__snake_case )
_a : Union[str, Any] = list(__snake_case )
_a : Tuple = list(__snake_case )
_a : str = xvector_output_dim
@property
def snake_case_ ( self : Optional[int] ) -> Dict:
return math.prod(self.conv_stride )
| 471
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : str = logging.get_logger(__name__)
snake_case : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
A__ : Dict = TOKENIZER_CLASSES
else:
A__ : Tuple = {tokenizer_name: getattr(__lowercase , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
A__ : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
A__ : str = True
if checkpoint_name is None:
A__ : str = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A__ : Union[str, Any] = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
A__ : Optional[int] = tokenizer_class.from_pretrained(__lowercase , force_download=__lowercase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
A__ , A__ : Tuple = checkpoint.split("/" )
A__ : int = os.path.join(__lowercase , __lowercase )
elif add_prefix:
A__ : List[Any] = checkpoint
A__ : Optional[Any] = dump_path
else:
A__ : str = None
A__ : int = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A__ : Optional[int] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A__ : Optional[int] = file_path.split(__lowercase )[-1][0]
if next_char == "/":
A__ : Any = os.path.join(__lowercase , __lowercase )
A__ : Dict = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
A__ : List[str] = tokenizer.save_pretrained(
__lowercase , legacy_format=__lowercase , filename_prefix=__lowercase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__lowercase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
snake_case : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 182
|
def snake_case__ ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = []
A__ : Optional[int] = 1
while len(__lowercase ) < 1E6:
constant.append(str(__lowercase ) )
i += 1
A__ : Any = "".join(__lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 182
| 1
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = "PoolFormerConfig"
# Base docstring
UpperCAmelCase_ = "sail/poolformer_s12"
UpperCAmelCase_ = [1, 5_12, 7, 7]
# Image classification docstring
UpperCAmelCase_ = "sail/poolformer_s12"
UpperCAmelCase_ = "tabby, tabby cat"
UpperCAmelCase_ = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
_UpperCAmelCase = 1 - drop_prob
_UpperCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_UpperCAmelCase = keep_prob + torch.rand(SCREAMING_SNAKE_CASE_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_UpperCAmelCase = input.div(SCREAMING_SNAKE_CASE_ ) * random_tensor
return output
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase = None ):
super().__init__()
_UpperCAmelCase = drop_prob
def UpperCamelCase( self , _UpperCamelCase ):
return drop_path(_UpperCamelCase , self.drop_prob , self.training )
def UpperCamelCase( self ):
return "p={}".format(self.drop_prob )
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
super().__init__()
_UpperCAmelCase = patch_size if isinstance(_UpperCamelCase , collections.abc.Iterable ) else (patch_size, patch_size)
_UpperCAmelCase = stride if isinstance(_UpperCamelCase , collections.abc.Iterable ) else (stride, stride)
_UpperCAmelCase = padding if isinstance(_UpperCamelCase , collections.abc.Iterable ) else (padding, padding)
_UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=_UpperCamelCase , stride=_UpperCamelCase , padding=_UpperCamelCase )
_UpperCAmelCase = norm_layer(_UpperCamelCase ) if norm_layer else nn.Identity()
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.projection(_UpperCamelCase )
_UpperCAmelCase = self.norm(_UpperCamelCase )
return embeddings
class __UpperCamelCase ( nn.GroupNorm ):
def __init__( self , _UpperCamelCase , **_UpperCamelCase ):
super().__init__(1 , _UpperCamelCase , **_UpperCamelCase )
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = nn.AvgPoolad(_UpperCamelCase , stride=1 , padding=pool_size // 2 , count_include_pad=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
return self.pool(_UpperCamelCase ) - hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
_UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
_UpperCAmelCase = PoolFormerDropPath(_UpperCamelCase )
if isinstance(config.hidden_act , _UpperCamelCase ):
_UpperCAmelCase = ACTaFN[config.hidden_act]
else:
_UpperCAmelCase = config.hidden_act
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.conva(_UpperCamelCase )
_UpperCAmelCase = self.act_fn(_UpperCamelCase )
_UpperCAmelCase = self.drop(_UpperCamelCase )
_UpperCAmelCase = self.conva(_UpperCamelCase )
_UpperCAmelCase = self.drop(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = PoolFormerPooling(_UpperCamelCase )
_UpperCAmelCase = PoolFormerOutput(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = PoolFormerGroupNorm(_UpperCamelCase )
_UpperCAmelCase = PoolFormerGroupNorm(_UpperCamelCase )
# Useful for training neural nets
_UpperCAmelCase = PoolFormerDropPath(_UpperCamelCase ) if drop_path > 0.0 else nn.Identity()
_UpperCAmelCase = config.use_layer_scale
if config.use_layer_scale:
_UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_UpperCamelCase) ) , requires_grad=_UpperCamelCase )
_UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_UpperCamelCase) ) , requires_grad=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if self.use_layer_scale:
_UpperCAmelCase = self.pooling(self.before_norm(_UpperCamelCase ) )
_UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_UpperCAmelCase = hidden_states + self.drop_path(_UpperCamelCase )
_UpperCAmelCase = ()
_UpperCAmelCase = self.output(self.after_norm(_UpperCamelCase ) )
_UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_UpperCAmelCase = hidden_states + self.drop_path(_UpperCamelCase )
_UpperCAmelCase = (output,) + outputs
return outputs
else:
_UpperCAmelCase = self.drop_path(self.pooling(self.before_norm(_UpperCamelCase ) ) )
# First residual connection
_UpperCAmelCase = pooling_output + hidden_states
_UpperCAmelCase = ()
# Second residual connection inside the PoolFormerOutput block
_UpperCAmelCase = self.drop_path(self.output(self.after_norm(_UpperCamelCase ) ) )
_UpperCAmelCase = hidden_states + layer_output
_UpperCAmelCase = (output,) + outputs
return outputs
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = config
# stochastic depth decay rule
_UpperCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_UpperCAmelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_UpperCAmelCase = nn.ModuleList(_UpperCamelCase )
# Transformer blocks
_UpperCAmelCase = []
_UpperCAmelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_UpperCAmelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_UpperCamelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_UpperCamelCase ) )
_UpperCAmelCase = nn.ModuleList(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=True ):
_UpperCAmelCase = () if output_hidden_states else None
_UpperCAmelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_UpperCAmelCase , _UpperCAmelCase = layers
# Get patch embeddings from hidden_states
_UpperCAmelCase = embedding_layer(_UpperCamelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(_UpperCamelCase ):
_UpperCAmelCase = blk(_UpperCamelCase )
_UpperCAmelCase = layer_outputs[0]
if output_hidden_states:
_UpperCAmelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase )
class __UpperCamelCase ( A__ ):
__A : Tuple = PoolFormerConfig
__A : int = """poolformer"""
__A : Tuple = """pixel_values"""
__A : Optional[Any] = True
def UpperCamelCase( self , _UpperCamelCase ):
if isinstance(_UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_UpperCamelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = value
UpperCAmelCase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , A__ , )
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = config
_UpperCAmelCase = PoolFormerEncoder(_UpperCamelCase )
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , ):
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
_UpperCAmelCase = self.encoder(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , )
_UpperCAmelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = nn.Linear(config.hidden_size , config.hidden_size )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.dense(_UpperCamelCase )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , A__ , )
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = PoolFormerModel(_UpperCamelCase )
# Final norm
_UpperCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_UpperCAmelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.poolformer(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , )
_UpperCAmelCase = outputs[0]
_UpperCAmelCase = self.classifier(self.norm(_UpperCamelCase ).mean([-2, -1] ) )
_UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase = '''single_label_classification'''
else:
_UpperCAmelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCAmelCase = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase = BCEWithLogitsLoss()
_UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase )
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states )
| 32
|
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 32
| 1
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase = 1_000_000 ) ->int:
"""simple docstring"""
a_ = limit + 1
a_ = [0] * limit
for first_term in range(1 , UpperCAmelCase ):
for n in range(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 210
| 1
|
"""simple docstring"""
UpperCAmelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 420
|
"""simple docstring"""
from math import factorial, pi
def lowercase ( a__ : float , a__ : int = 30 ) -> float:
if not isinstance(a__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(a__ , a__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
_UpperCamelCase = float(a__ )
_UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a__ ) )
def lowercase ( a__ : float , a__ : int = 30 ) -> float:
if not isinstance(a__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(a__ , a__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
_UpperCamelCase = float(a__ )
_UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 420
| 1
|
'''simple docstring'''
import string
def lowerCAmelCase_ ( a : str ):
a__ = ''
for i in sequence:
a__ = ord(a )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowerCAmelCase_ ( a : str ):
a__ = string.ascii_letters
a__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(a )] if c in letters else c for c in sequence )
def lowerCAmelCase_ ( ):
from timeit import timeit
print('Running performance benchmarks...' )
a__ = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=a )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=a )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 126
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : int = get_logger()
__A : Optional[dict] = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
a__ = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
a__ = str(jax.devices()[0] )
a__ = jnp_array_kwargs
@staticmethod
def lowercase__ ( ):
"""simple docstring"""
import jax
return {str(_a ): device for device in jax.devices()}
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a__ = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a__ = {'dtype': jnp.intaa}
else:
a__ = {'dtype': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a__ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
a__ = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '__array__' ) and not isinstance(_a , jax.Array ):
a__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_row(_a )
a__ = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_column(_a )
a__ = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
a__ = self.recursive_tensorize(_a )
a__ = self._consolidate(_a )
return column
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_batch(_a )
a__ = self.python_features_decoder.decode_batch(_a )
a__ = self.recursive_tensorize(_a )
for column_name in batch:
a__ = self._consolidate(batch[column_name] )
return batch
| 126
| 1
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
A : List[str] = parent
A : Dict = batch_size
A : str = seq_length
A : Any = is_training
A : Union[str, Any] = use_input_mask
A : List[str] = use_token_type_ids
A : Any = use_labels
A : Optional[Any] = vocab_size
A : Any = hidden_size
A : Optional[Any] = num_hidden_layers
A : str = num_attention_heads
A : int = intermediate_size
A : str = hidden_act
A : int = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : List[str] = type_vocab_size
A : int = type_sequence_label_size
A : List[Any] = initializer_range
A : Union[str, Any] = num_labels
A : Tuple = num_choices
A : Any = scope
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[Any] = None
if self.use_input_mask:
A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A : Optional[Any] = None
if self.use_token_type_ids:
A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : List[str] = None
A : Tuple = None
A : Dict = None
if self.use_labels:
A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : int = ids_tensor([self.batch_size] , self.num_choices )
A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : List[Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
A : Optional[int] = True
A : str = OpenLlamaModel(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : Tuple = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : List[Any] = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
A : str = True
A : int = True
A : Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
A : Optional[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Optional[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : int = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[int] = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Optional[Any] = config_and_inputs
A : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__magic_name__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Any = OpenLlamaModelTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : Tuple = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A, A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = 3
A : List[str] = input_dict['''input_ids''']
A : Dict = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : int = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = 3
A : Tuple = '''single_label_classification'''
A : Dict = input_dict['''input_ids''']
A : List[str] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : List[str] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A, A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : str = 3
A : Any = '''multi_label_classification'''
A : Any = input_dict['''input_ids''']
A : List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A : Any = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A, A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ids_tensor([1, 10] , config.vocab_size )
A : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : str = OpenLlamaModel(SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
original_model.eval()
A : List[str] = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state
A : List[str] = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : Optional[Any] = {'''type''': scaling_type, '''factor''': 10.0}
A : Optional[int] = OpenLlamaModel(SCREAMING_SNAKE_CASE )
scaled_model.to(SCREAMING_SNAKE_CASE )
scaled_model.eval()
A : List[str] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state
A : List[str] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
| 343
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class A ( __snake_case ):
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if tokenize_kwargs is None:
A : str = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
A : Any = truncation
A : Dict = tokenize_kwargs
A : int = {}
if return_tensors is not None:
A : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
A : List[str] = self.framework
A : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : int = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 343
| 1
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase:
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = d_model
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = prediction_length
_UpperCamelCase = context_length
_UpperCamelCase = cardinality
_UpperCamelCase = num_time_features
_UpperCamelCase = lags_sequence
_UpperCamelCase = embedding_dimension
_UpperCamelCase = is_training
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = context_length
_UpperCamelCase = prediction_length + label_length
_UpperCamelCase = label_length
_UpperCamelCase = moving_average
_UpperCamelCase = autocorrelation_factor
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = config.context_length + max(config.lags_sequence)
_UpperCamelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
_UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
_UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length])
_UpperCamelCase = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.prepare_autoformer_inputs_dict(__a)
return config, inputs_dict
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AutoformerModel(config=__a).to(__a).eval()
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.encoder_last_hidden_state
_UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_encoder()
encoder.save_pretrained(__a)
_UpperCamelCase = AutoformerEncoder.from_pretrained(__a).to(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model.create_network_inputs(**__a)
_UpperCamelCase , _UpperCamelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
_UpperCamelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCamelCase = encoder(inputs_embeds=__a)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
_UpperCamelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1)
.unsqueeze(1)
.repeat(1 , config.prediction_length , 1)
)
_UpperCamelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCamelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCamelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_decoder()
decoder.save_pretrained(__a)
_UpperCamelCase = AutoformerDecoder.from_pretrained(__a).to(__a)
_UpperCamelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a)
_UpperCamelCase , _UpperCamelCase = model_class.from_pretrained(__a , output_loading_info=__a)
self.assertEqual(info['''missing_keys'''] , [])
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a)
@unittest.skip(reason='''Model has no tokens embeddings''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = inspect.signature(getattr(__a , '''forward'''))
# The main input is the name of the argument after `self`
_UpperCamelCase = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name , __a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''')
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
])
self.assertListEqual(arg_names[: len(__a)] , __a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , '''seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''decoder_seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''encoder_seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''d_model''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''num_attention_heads''' , __a)
_UpperCamelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCamelCase = len(__a)
_UpperCamelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a)
# decoder attentions
_UpperCamelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCamelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
self.assertEqual(out_len + 2 , len(__a))
_UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( __snake_case="train-batch.pt" ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''', filename=__snake_case, repo_type='''dataset''' )
_UpperCamelCase = torch.load(__snake_case, map_location=__snake_case )
return batch
@require_torch
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch()
with torch.no_grad():
_UpperCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_UpperCamelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch('''val-batch.pt''')
with torch.no_grad():
_UpperCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_UpperCamelCase = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch('''val-batch.pt''')
with torch.no_grad():
_UpperCamelCase = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_UpperCamelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape , __a)
_UpperCamelCase = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a)
_UpperCamelCase = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1))
| 19
|
import cva
import numpy as np
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return str(self.k )
def _lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(lowerCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ ,A_ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 393
| 0
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_snake_case : int = open # noqa: we just need to have a builtin inside this module to test it properly
| 711
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : Optional[int] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Tuple = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCamelCase )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_SCREAMING_SNAKE_CASE = DDIMScheduler(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase ( self , UpperCamelCase , UpperCamelCase=0 ):
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_SCREAMING_SNAKE_CASE = "A red cartoon frog, 4k"
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 493
| 0
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = """https://openaipublic.azureedge.net/jukebox/models/"""
_a = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.1.bias''', '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.1.weight''', '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.3.bias''', '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.3.weight''', '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
_UpperCamelCase = key.replace('''conditioner_blocks.0''', '''conditioner_blocks''' )
if "prime_prior" in key:
_UpperCamelCase = key.replace('''prime_prior''', '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCamelCase = key.replace('''.emb.''', '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''', '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''', '''metadata_embedding.''' )
if "x_emb.emb." in key:
_UpperCamelCase = key.replace('''0.x_emb.emb''', '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''', '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''', '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''', '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''', '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''', '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''', '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''', '''embed_tokens''' )
return key
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {}
import re
_UpperCamelCase = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_UpperCamelCase = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_UpperCamelCase = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
_UpperCamelCase = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(a_ ):
_UpperCamelCase = re_encoder_block_conv_in.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCamelCase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_UpperCamelCase = re_encoder_block_conv_in.sub(a_, a_ )
elif re_encoder_block_resnet.fullmatch(a_ ):
_UpperCamelCase = re_encoder_block_resnet.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_UpperCamelCase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_UpperCamelCase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCamelCase = prefix + resnet_block
_UpperCamelCase = re_encoder_block_resnet.sub(a_, a_ )
elif re_encoder_block_proj_out.fullmatch(a_ ):
_UpperCamelCase = re_encoder_block_proj_out.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_UpperCamelCase = re_encoder_block_proj_out.sub(a_, a_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(a_ ):
_UpperCamelCase = re_decoder_block_conv_out.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCamelCase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_UpperCamelCase = re_decoder_block_conv_out.sub(a_, a_ )
elif re_decoder_block_resnet.fullmatch(a_ ):
_UpperCamelCase = re_decoder_block_resnet.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_UpperCamelCase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_UpperCamelCase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCamelCase = prefix + resnet_block
_UpperCamelCase = re_decoder_block_resnet.sub(a_, a_ )
elif re_decoder_block_proj_in.fullmatch(a_ ):
_UpperCamelCase = re_decoder_block_proj_in.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_UpperCamelCase = re_decoder_block_proj_in.sub(a_, a_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(a_ ):
_UpperCamelCase = re_prior_cond_conv_out.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCamelCase = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_UpperCamelCase = re_prior_cond_conv_out.sub(a_, a_ )
elif re_prior_cond_resnet.fullmatch(a_ ):
_UpperCamelCase = re_prior_cond_resnet.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_UpperCamelCase = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_UpperCamelCase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCamelCase = prefix + resnet_block
_UpperCamelCase = re_prior_cond_resnet.sub(a_, a_ )
elif re_prior_cond_proj_in.fullmatch(a_ ):
_UpperCamelCase = re_prior_cond_proj_in.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_UpperCamelCase = re_prior_cond_proj_in.sub(a_, a_ )
# keep original key
else:
_UpperCamelCase = original_key
_UpperCamelCase = replace_key(a_ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
_UpperCamelCase = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_UpperCamelCase = original_key
_UpperCamelCase = original_key
_UpperCamelCase = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( __snake_case=None, __snake_case=None ) -> Optional[int]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_UpperCamelCase = requests.get(F'''{PREFIX}{file}''', allow_redirects=a_ )
os.makedirs(F'''{pytorch_dump_folder_path}/''', exist_ok=a_ )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''', '''wb''' ).write(r.content )
_UpperCamelCase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
_UpperCamelCase = JukeboxConfig.from_pretrained(a_ )
_UpperCamelCase = JukeboxModel(a_ )
_UpperCamelCase = []
_UpperCamelCase = {}
for i, dict_name in enumerate(a_ ):
_UpperCamelCase = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
_UpperCamelCase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
_UpperCamelCase = old_dic[k]
elif k.endswith('''.w''' ):
_UpperCamelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCamelCase = old_dic[k]
else:
_UpperCamelCase = old_dic[k]
_UpperCamelCase = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
_UpperCamelCase = fix_jukebox_keys(a_, model.state_dict(), a_, a_ )
weight_dict.append(a_ )
_UpperCamelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(a_ )
for i in range(len(a_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(a_ ).mkdir(exist_ok=a_ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''', '''w''' ) as txtfile:
json.dump(a_, a_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
return weight_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_a = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 19
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = ["image_processor", "tokenizer"]
__A : Dict = "BlipImageProcessor"
__A : Dict = "AutoTokenizer"
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = False
super().__init__(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase :List[Any] = self.tokenizer
lowercase :str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase :int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.tokenizer.model_input_names
lowercase :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 677
| 0
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def A_ ( _lowercase ):
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__a = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__a = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__a = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__a = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__a = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case_ :Any = k.replace(_lowercase, _lowercase )
return k
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = BigBirdPegasusConfig(**_lowercase )
snake_case_ :int = BigBirdPegasusForConditionalGeneration(_lowercase )
snake_case_ :List[str] = torch_model.state_dict()
snake_case_ :Dict = {}
# separating decoder weights
snake_case_ :Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
snake_case_ :int = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items(), """tf -> hf conversion""" ):
snake_case_ :List[str] = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
snake_case_ :Optional[int] = DECODER_PATTERNS
snake_case_ :int = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
snake_case_ :Any = v.T
snake_case_ :Tuple = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items(), """tf -> hf conversion""" ):
snake_case_ :int = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
snake_case_ :int = REMAINING_PATTERNS
snake_case_ :Optional[int] = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
snake_case_ :Tuple = v.T
snake_case_ :str = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case_ :Union[str, Any] = mapping["""model.embed_positions.weight"""]
snake_case_ :List[str] = mapping.pop("""model.embed_positions.weight""" )
snake_case_, snake_case_ :Optional[Any] = torch_model.load_state_dict(_lowercase, strict=_lowercase )
snake_case_ :Any = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = tf.train.list_variables(_lowercase )
snake_case_ :Union[str, Any] = {}
snake_case_ :Any = ["""global_step"""]
for name, shape in tqdm(_lowercase, desc="""converting tf checkpoint to dict""" ):
snake_case_ :int = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ :List[Any] = tf.train.load_variable(_lowercase, _lowercase )
snake_case_ :str = array
return tf_weights
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Any = get_tf_weights_as_numpy(_lowercase )
snake_case_ :Any = convert_bigbird_pegasus(_lowercase, _lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__a = parser.parse_args()
__a = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 310
| 0
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : int , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
SCREAMING_SNAKE_CASE : Tuple = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
if self.streaming:
SCREAMING_SNAKE_CASE : Dict = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Any = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
| 25
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch
| 25
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class a__ ( lowercase__ ):
A__ : Optional[int] = 'ctrl'
A__ : Dict = ['past_key_values']
A__ : List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase=2_4_6_5_3_4 , UpperCAmelCase=2_5_6 , UpperCAmelCase=1_2_8_0 , UpperCAmelCase=8_1_9_2 , UpperCAmelCase=4_8 , UpperCAmelCase=1_6 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=0.02 , UpperCAmelCase=True , **UpperCAmelCase , ) -> int:
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = dff
__a = resid_pdrop
__a = embd_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = use_cache
super().__init__(**UpperCAmelCase__ )
| 700
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase_ : Union[str, Any] = 250_004
lowerCamelCase_ : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class a__ ( __snake_case , unittest.TestCase ):
A__ : Optional[Any] = MBartTokenizer
A__ : Optional[Any] = MBartTokenizerFast
A__ : Dict = True
A__ : Dict = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__a = MBartTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = MBartTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
__a = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__a = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__a = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__a = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCAmelCase )
__a = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCAmelCase )
__a = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
__a = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCAmelCase )
__a = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
__a = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCAmelCase )
__a = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
A__ : Optional[Any] = 'facebook/mbart-large-en-ro'
A__ : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
A__ : Tuple = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
A__ : Dict = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Optional[Any]:
__a = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__a = 1
return cls
def __SCREAMING_SNAKE_CASE ( self ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids )
__a = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__a = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , UpperCAmelCase )
__a = 1_0
__a = self.tokenizer(UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase )
__a = MBartTokenizer.from_pretrained(UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , return_tensors='pt' )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.tokenizer(self.src_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=3 , return_tensors='pt' )
__a = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=1_0 , return_tensors='pt' )
__a = targets['input_ids']
__a = shift_tokens_right(UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 246
| 0
|
from math import sqrt
def lowerCamelCase__ ( _a):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_a) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _a = 10001):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : List[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(_a):
count += 1
while count != nth:
number += 2
if is_prime(_a):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Any = logging.get_logger(__name__)
def lowercase_ ( lowercase__ , lowercase__=False ) ->Tuple:
_snake_case: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case: int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase_ ( lowercase__ , lowercase__ , lowercase__=False ) ->Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case: Union[str, Any] = ''
else:
_snake_case: Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case: Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_snake_case: Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case: str = in_proj_weight[
: config.hidden_size, :
]
_snake_case: Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case: List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case: List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case: Dict = in_proj_weight[
-config.hidden_size :, :
]
_snake_case: str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( lowercase__ ) ->Union[str, Any]:
_snake_case: Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ ) ->str:
_snake_case: str = dct.pop(lowercase__ )
_snake_case: Any = val
def lowercase_ ( ) ->int:
_snake_case: Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case: str = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def lowercase_ ( lowercase__ , lowercase__ , lowercase__=True ) ->Dict:
_snake_case: Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_snake_case: List[str] = 8
# set labels if required
if not base_model:
_snake_case: List[Any] = 1000
_snake_case: Union[str, Any] = 'huggingface/label-files'
_snake_case: Optional[int] = 'imagenet-1k-id2label.json'
_snake_case: Optional[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
_snake_case: Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_snake_case: Dict = idalabel
_snake_case: Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_snake_case: str = 384
_snake_case: str = 1536
_snake_case: Any = 12
_snake_case: List[Any] = 6
# load original model from torch hub
_snake_case: List[str] = torch.hub.load('facebookresearch/dino:main' , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case: Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
_snake_case: List[str] = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
_snake_case: int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
_snake_case: Tuple = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
_snake_case: int = ViTImageProcessor()
_snake_case: Dict = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case: Dict = encoding['pixel_values']
_snake_case: Optional[int] = model(lowercase__ )
if base_model:
_snake_case: List[str] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_snake_case: Any = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1e-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
A : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273
| 0
|
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCamelCase_(lowerCamelCase_="ro" , lowerCamelCase_="en" , lowerCamelCase_="wmt16" , lowerCamelCase_=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
UpperCAmelCase = F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}' )
UpperCAmelCase = datasets.load_dataset(lowerCamelCase_ , lowerCamelCase_ )
if save_dir is None:
UpperCAmelCase = F'{dataset}-{pair}'
UpperCAmelCase = Path(lowerCamelCase_ )
save_dir.mkdir(exist_ok=lowerCamelCase_ )
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
UpperCAmelCase = "val" if split == "validation" else split
UpperCAmelCase = save_dir.joinpath(F'{fn}.source' )
UpperCAmelCase = save_dir.joinpath(F'{fn}.target' )
UpperCAmelCase = src_path.open("w+" )
UpperCAmelCase = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCAmelCase = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 323
|
__lowerCamelCase : str = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowerCamelCase_(lowerCamelCase_ ) -> int:
UpperCAmelCase = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
UpperCAmelCase = 0
UpperCAmelCase = 0
while place < len(lowerCamelCase_ ):
if (place + 1 < len(lowerCamelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCamelCase_(lowerCamelCase_ ) -> str:
UpperCAmelCase = []
for arabic, roman in ROMAN:
((UpperCAmelCase) , (UpperCAmelCase)) = divmod(lowerCamelCase_ , lowerCamelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """pix2struct_text_model"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , __UpperCamelCase : Dict=5_0_2_4_4 , __UpperCamelCase : int=7_6_8 , __UpperCamelCase : Optional[Any]=6_4 , __UpperCamelCase : Any=2_0_4_8 , __UpperCamelCase : Tuple=1_2 , __UpperCamelCase : List[str]=1_2 , __UpperCamelCase : int=3_2 , __UpperCamelCase : Tuple=1_2_8 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=1e-6 , __UpperCamelCase : Tuple=1.0 , __UpperCamelCase : str="gelu_new" , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : int=False , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[Any]=True , **__UpperCamelCase : Optional[int] , )->List[Any]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = use_cache
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
_UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , is_decoder=__UpperCamelCase , **__UpperCamelCase , )
@classmethod
def lowercase__ ( cls : str , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : Dict )->"PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
_UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """pix2struct_vision_model"""
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Any=7_6_8 , __UpperCamelCase : Tuple=2_0_4_8 , __UpperCamelCase : List[Any]=6_4 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Union[str, Any]="gelu_new" , __UpperCamelCase : Optional[int]=1e-6 , __UpperCamelCase : str=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Optional[int]=1e-10 , __UpperCamelCase : Any=1.0 , __UpperCamelCase : List[str]=4_0_9_6 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : List[Any]=1_2_8 , **__UpperCamelCase : Union[str, Any] , )->Tuple:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = patch_embed_hidden_size
_UpperCAmelCase = d_ff
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = dense_act_fn
_UpperCAmelCase = seq_len
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = d_kv
@classmethod
def lowercase__ ( cls : Optional[Any] , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : List[Any] )->"PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
_UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """pix2struct"""
UpperCamelCase__ = True
def __init__( self : Optional[Any] , __UpperCamelCase : int=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=1.0 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Dict=False , __UpperCamelCase : Tuple=False , __UpperCamelCase : int=True , **__UpperCamelCase : int , )->Union[str, Any]:
super().__init__(tie_word_embeddings=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
if text_config is None:
_UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
_UpperCAmelCase = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
_UpperCAmelCase = PixaStructTextConfig(**__UpperCamelCase )
_UpperCAmelCase = PixaStructVisionConfig(**__UpperCamelCase )
_UpperCAmelCase = self.text_config.decoder_start_token_id
_UpperCAmelCase = self.text_config.pad_token_id
_UpperCAmelCase = self.text_config.eos_token_id
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = is_vqa
@classmethod
def lowercase__ ( cls : List[str] , __UpperCamelCase : PixaStructTextConfig , __UpperCamelCase : PixaStructVisionConfig , **__UpperCamelCase : int )->int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->List[Any]:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 713
|
"""simple docstring"""
import functools
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ = """"""
else:
SCREAMING_SNAKE_CASE__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE__ = 1_000
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE__ = 192
SCREAMING_SNAKE_CASE__ = 768
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 3
elif deit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 1_536
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE__ = 1_024
SCREAMING_SNAKE_CASE__ = 4_096
SCREAMING_SNAKE_CASE__ = 24
SCREAMING_SNAKE_CASE__ = 16
# load original model from timm
SCREAMING_SNAKE_CASE__ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ = timm_model.state_dict()
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
SCREAMING_SNAKE_CASE__ = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE__ = DeiTImageProcessor(size=UpperCamelCase__ , crop_size=config.image_size )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 6
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 6
| 1
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _lowerCamelCase ( __a, __a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE_ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ = ya
SCREAMING_SNAKE_CASE_ = xa
for k in range(__a ):
SCREAMING_SNAKE_CASE_ = y[k] + step_size * ode_func(__a, y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 628
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = "data2vec-vision"
def __init__( self : Dict , __snake_case : Any=768 , __snake_case : Dict=12 , __snake_case : Dict=12 , __snake_case : List[Any]=3072 , __snake_case : int="gelu" , __snake_case : Dict=0.0 , __snake_case : List[str]=0.0 , __snake_case : List[Any]=0.02 , __snake_case : Optional[Any]=1E-12 , __snake_case : List[Any]=224 , __snake_case : Dict=16 , __snake_case : Optional[Any]=3 , __snake_case : int=False , __snake_case : str=False , __snake_case : Optional[int]=False , __snake_case : Dict=False , __snake_case : Optional[int]=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=[3, 5, 7, 11] , __snake_case : int=[1, 2, 3, 6] , __snake_case : Union[str, Any]=True , __snake_case : Tuple=0.4 , __snake_case : str=256 , __snake_case : int=1 , __snake_case : int=False , __snake_case : List[Any]=255 , **__snake_case : int , ) -> Optional[int]:
super().__init__(**__snake_case )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Tuple = layer_norm_eps
UpperCAmelCase : str = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : str = use_mask_token
UpperCAmelCase : Union[str, Any] = use_absolute_position_embeddings
UpperCAmelCase : Tuple = use_relative_position_bias
UpperCAmelCase : List[str] = use_shared_relative_position_bias
UpperCAmelCase : Dict = layer_scale_init_value
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase : Tuple = out_indices
UpperCAmelCase : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase : Optional[int] = use_auxiliary_head
UpperCAmelCase : Optional[int] = auxiliary_loss_weight
UpperCAmelCase : Optional[Any] = auxiliary_channels
UpperCAmelCase : int = auxiliary_num_convs
UpperCAmelCase : List[str] = auxiliary_concat_input
UpperCAmelCase : str = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A ( self : Union[str, Any] ) -> List[str]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Any ) -> str:
return 1E-4
| 127
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] =["pixel_values"]
def __init__( self : Optional[int] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 2_55 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : List[Any] , ):
super().__init__(**lowerCAmelCase )
A_ = size if size is not None else {"height": 3_84, "width": 3_84}
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
A_ = do_resize
A_ = size
A_ = resample
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ = image_std if image_std is not None else OPENAI_CLIP_STD
A_ = do_convert_rgb
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ):
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
A_ = (size["height"], size["width"])
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : bool = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : List[str] , ):
A_ = do_resize if do_resize is not None else self.do_resize
A_ = resample if resample is not None else self.resample
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ = size if size is not None else self.size
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
A_ = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ = [convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
A_ = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
A_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_rescale:
A_ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
A_ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
A_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
A_ = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase )
return encoded_outputs
| 452
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Union[str, Any] = """xmod"""
def __init__( self : Tuple , UpperCAmelCase : Tuple=3_05_22 , UpperCAmelCase : Any=7_68 , UpperCAmelCase : Any=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : List[str]=30_72 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=False , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=("en_XX",) , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :Dict = intermediate_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Tuple = type_vocab_size
SCREAMING_SNAKE_CASE_ :List[str] = initializer_range
SCREAMING_SNAKE_CASE_ :Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ :str = position_embedding_type
SCREAMING_SNAKE_CASE_ :Any = use_cache
SCREAMING_SNAKE_CASE_ :str = classifier_dropout
SCREAMING_SNAKE_CASE_ :List[str] = pre_norm
SCREAMING_SNAKE_CASE_ :List[str] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ :int = adapter_layer_norm
SCREAMING_SNAKE_CASE_ :Dict = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ :Optional[Any] = ln_before_adapter
SCREAMING_SNAKE_CASE_ :List[Any] = list(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = default_language
class _UpperCAmelCase ( lowercase ):
@property
def _snake_case ( self : Any):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ :Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 140
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Union[str, Any] = """xmod"""
def __init__( self : Tuple , UpperCAmelCase : Tuple=3_05_22 , UpperCAmelCase : Any=7_68 , UpperCAmelCase : Any=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : List[str]=30_72 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=False , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=("en_XX",) , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :Dict = intermediate_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Tuple = type_vocab_size
SCREAMING_SNAKE_CASE_ :List[str] = initializer_range
SCREAMING_SNAKE_CASE_ :Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ :str = position_embedding_type
SCREAMING_SNAKE_CASE_ :Any = use_cache
SCREAMING_SNAKE_CASE_ :str = classifier_dropout
SCREAMING_SNAKE_CASE_ :List[str] = pre_norm
SCREAMING_SNAKE_CASE_ :List[str] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ :int = adapter_layer_norm
SCREAMING_SNAKE_CASE_ :Dict = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ :Optional[Any] = ln_before_adapter
SCREAMING_SNAKE_CASE_ :List[Any] = list(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = default_language
class _UpperCAmelCase ( lowercase ):
@property
def _snake_case ( self : Any):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ :Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 140
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCamelCase : list[int] , lowerCamelCase : int ) -> list[int]:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Optional[int] = len(lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase__ : Dict = i + 1
else:
lowerCAmelCase__ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 308
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( UpperCAmelCase ):
_lowercase = "conditional_detr"
_lowercase = ["past_key_values"]
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.25 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[Any] = backbone_config.get("model_type" )
_UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Dict = config_class.from_dict(A_ )
_UpperCAmelCase : Any = use_timm_backbone
_UpperCAmelCase : List[Any] = backbone_config
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = num_queries
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : List[str] = encoder_attention_heads
_UpperCAmelCase : Optional[int] = decoder_ffn_dim
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Optional[Any] = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : List[str] = activation_function
_UpperCAmelCase : Optional[int] = init_std
_UpperCAmelCase : List[Any] = init_xavier_std
_UpperCAmelCase : Optional[int] = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Union[str, Any] = auxiliary_loss
_UpperCAmelCase : str = position_embedding_type
_UpperCAmelCase : str = backbone
_UpperCAmelCase : int = use_pretrained_backbone
_UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Tuple = bbox_cost
_UpperCAmelCase : Dict = giou_cost
# Loss coefficients
_UpperCAmelCase : Any = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : Any = cls_loss_coefficient
_UpperCAmelCase : Any = bbox_loss_coefficient
_UpperCAmelCase : Optional[int] = giou_loss_coefficient
_UpperCAmelCase : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : Tuple = self.__class__.model_type
return output
class a ( UpperCAmelCase ):
_lowercase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 1e-5
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 12
| 300
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase: Any = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Tuple = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
from __future__ import annotations
class lowerCamelCase__ :
def __init__( self : List[str] , lowercase__ : list[list[int]] ):
_lowerCAmelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowercase__ ) != 0:
_lowerCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowercase__ ) != cols:
raise error
for value in row:
if not isinstance(lowercase__ , (int, float) ):
raise error
_lowerCAmelCase = rows
else:
_lowerCAmelCase = []
def SCREAMING_SNAKE_CASE__ ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : int , lowercase__ : int ):
_lowerCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowercase__ ).determinant()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : int , lowercase__ : int ):
if (row + column) % 2 == 0:
return self.get_minor(lowercase__ , lowercase__ )
return -1 * self.get_minor(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return Matrix(
[
[self.get_minor(lowercase__ , lowercase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[Any] ):
return str(self.rows )
def __str__( self : List[Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowercase__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : list[int] , lowercase__ : int | None = None ):
_lowerCAmelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowercase__ , lowercase__ ):
raise type_error
for value in row:
if not isinstance(lowercase__ , (int, float) ):
raise type_error
if len(lowercase__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowercase__ )
else:
_lowerCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : list[int] , lowercase__ : int | None = None ):
_lowerCAmelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowercase__ , lowercase__ ):
raise type_error
for value in column:
if not isinstance(lowercase__ , (int, float) ):
raise type_error
if len(lowercase__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_lowerCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_lowerCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : str , lowercase__ : object ):
if not isinstance(lowercase__ , lowercase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[str] , lowercase__ : object ):
return not self == other
def __neg__( self : Any ):
return self * -1
def __add__( self : Tuple , lowercase__ : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any , lowercase__ : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , lowercase__ : Matrix | int | float ):
if isinstance(lowercase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowercase__ , lowercase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowercase__ , lowercase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : List[str] , lowercase__ : int ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_lowerCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , lowercase__ : list[int] , lowercase__ : list[int] ):
return sum(row[i] * column[i] for i in range(len(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def A(__a: int ):
if not isinstance(__a , __a ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCAmelCase_ = precision
lowerCAmelCase_ = ceil(precision / 14 )
lowerCAmelCase_ = 42_6880 * Decimal(1_0005 ).sqrt()
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1359_1409
lowerCAmelCase_ = Decimal(__a )
for k in range(1 , __a ):
lowerCAmelCase_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase__ = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 122
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , _a , _a=2 , _a=True , _a=False , _a=10 , _a=3 , _a=32 * 8 , _a=32 * 8 , _a=4 , _a=64 , ) -> List[str]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_auxiliary_loss
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_size
lowerCAmelCase_ = max_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = hidden_dim
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
lowerCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
lowerCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
lowerCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
lowerCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __a ( self ) -> int:
lowerCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ = self.num_queries
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = [1, 1, 1, 1]
lowerCAmelCase_ = self.num_channels
lowerCAmelCase_ = 64
lowerCAmelCase_ = 128
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
return config
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __a ( self , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = output.encoder_hidden_states
lowerCAmelCase_ = output.pixel_decoder_hidden_states
lowerCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def __a ( self , _a , _a , _a , _a=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a )
lowerCAmelCase_ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def __a ( self , _a , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a )
lowerCAmelCase_ = model(_a )
comm_check_on_output(_a )
lowerCAmelCase_ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Tuple:
lowerCAmelCase_ = MaskaFormerModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def __a ( self ) -> Any:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def __a ( self ) -> Optional[int]:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def __a ( self ) -> str:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def __a ( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> List[str]:
pass
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
@slow
def __a ( self ) -> int:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = (self.model_tester.min_size,) * 2
lowerCAmelCase_ = {
"pixel_values": torch.randn((2, 3, *size) , device=_a ),
"mask_labels": torch.randn((2, 10, *size) , device=_a ),
"class_labels": torch.zeros(2 , 10 , device=_a ).long(),
}
lowerCAmelCase_ = self.model_tester.get_config()
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
lowerCAmelCase_ = model(**_a )
self.assertTrue(outputs.loss is not None )
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a ).to(_a )
lowerCAmelCase_ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __a ( self ) -> List[str]:
if not self.model_tester.is_training:
return
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.train()
lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a ).to(_a )
model.train()
lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a )
lowerCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> Dict:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __a ( self ) -> Optional[Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __a ( self ) -> int:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a )
lowerCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
lowerCAmelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowerCAmelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowerCAmelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def __a ( self ) -> str:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a )
lowerCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
# masks_queries_logits
lowerCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
lowerCAmelCase_ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
lowerCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase_ = inputs["pixel_values"].to(_a )
lowerCAmelCase_ = [el.to(_a ) for el in inputs["mask_labels"]]
lowerCAmelCase_ = [el.to(_a ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 122
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase( _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_A )
if n > 1:
factors.append(_A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCamelCase( _A : str ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __UpperCamelCase( _A : int , _A : str ):
'''simple docstring'''
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCamelCase( _A : Optional[Any] , _A : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.dot(_A , _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def __UpperCamelCase( _A : Dict , _A : Optional[int] , _A : List[Any] , _A : str=7_00_00 ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.zeros(x.shape[1] )
for iterations in range(_A ):
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Optional[Any] = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Tuple = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = cost_function(_A , _A )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ : str = datasets.load_iris()
UpperCamelCase__ : Tuple = iris.data[:, :2]
UpperCamelCase__ : str = (iris.target != 0) * 1
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def __UpperCamelCase( _A : Any ):
'''simple docstring'''
return sigmoid_function(
np.dot(_A , _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCamelCase__) , (UpperCamelCase__)) : Any = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Optional[Any] = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ : str = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 496
| 0
|
'''simple docstring'''
import math
import sys
def _snake_case ( A ) -> str:
lowerCAmelCase__ = ''''''
try:
with open(A , '''rb''' ) as binary_file:
lowerCAmelCase__ = binary_file.read()
for dat in data:
lowerCAmelCase__ = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _snake_case ( A ) -> str:
lowerCAmelCase__ = {'''0''': '''0''', '''1''': '''1'''}
lowerCAmelCase__ , lowerCAmelCase__ = '''''', ''''''
lowerCAmelCase__ = len(A )
for i in range(len(A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ = lexicon[curr_string]
result += last_match_id
lowerCAmelCase__ = last_match_id + '''0'''
if math.loga(A ).is_integer():
lowerCAmelCase__ = {}
for curr_key in list(A ):
lowerCAmelCase__ = lexicon.pop(A )
lowerCAmelCase__ = new_lex
lowerCAmelCase__ = last_match_id + '''1'''
index += 1
lowerCAmelCase__ = ''''''
return result
def _snake_case ( A , A ) -> None:
lowerCAmelCase__ = 8
try:
with open(A , '''wb''' ) as opened_file:
lowerCAmelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A ) , A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _snake_case ( A ) -> str:
lowerCAmelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase__ = data_bits[counter:]
lowerCAmelCase__ = data_bits[counter + 1 :]
return data_bits
def _snake_case ( A , A ) -> None:
lowerCAmelCase__ = read_file_binary(A )
lowerCAmelCase__ = remove_prefix(A )
lowerCAmelCase__ = decompress_data(A )
write_file_binary(A , A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 90
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase = 256
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""melgan"""]
def __init__( self : Dict , _a : SpectrogramNotesEncoder , _a : SpectrogramContEncoder , _a : TaFilmDecoder , _a : DDPMScheduler , _a : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
__lowerCamelCase : Any = math.log(1e-5 ) # Matches MelGAN training.
__lowerCamelCase : List[Any] = 4.0 # Largest value for most examples
__lowerCamelCase : Tuple = 128
self.register_modules(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
def _lowercase ( self : Tuple , _a : int , _a : List[Any]=(-1.0, 1.0) , _a : Any=False ) -> Dict:
__lowerCamelCase ,__lowerCamelCase : Any = output_range
if clip:
__lowerCamelCase : List[Any] = torch.clip(_a , self.min_value , self.max_value )
# Scale to [0, 1].
__lowerCamelCase : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self : Dict , _a : List[str] , _a : int=(-1.0, 1.0) , _a : Dict=False ) -> List[str]:
__lowerCamelCase ,__lowerCamelCase : List[Any] = input_range
__lowerCamelCase : Optional[Any] = torch.clip(_a , _a , _a ) if clip else outputs
# Scale to [0, 1].
__lowerCamelCase : str = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self : int , _a : Dict , _a : List[str] , _a : Tuple ) -> Any:
__lowerCamelCase : Tuple = input_tokens > 0
__lowerCamelCase ,__lowerCamelCase : int = self.notes_encoder(
encoder_input_tokens=_a , encoder_inputs_mask=_a )
__lowerCamelCase ,__lowerCamelCase : Tuple = self.continuous_encoder(
encoder_inputs=_a , encoder_inputs_mask=_a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self : Tuple , _a : Tuple , _a : List[Any] , _a : int ) -> Dict:
__lowerCamelCase : Any = noise_time
if not torch.is_tensor(_a ):
__lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
__lowerCamelCase : List[str] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : Tuple = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase : int = self.decoder(
encodings_and_masks=_a , decoder_input_tokens=_a , decoder_noise_time=_a )
return logits
@torch.no_grad()
def __call__( self : Optional[int] , _a : List[List[int]] , _a : Optional[torch.Generator] = None , _a : int = 100 , _a : bool = True , _a : str = "numpy" , _a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _a : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_a )}.' )
__lowerCamelCase : Optional[int] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowerCamelCase : Dict = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowerCamelCase : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
for i, encoder_input_tokens in enumerate(_a ):
if i == 0:
__lowerCamelCase : List[str] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowerCamelCase : List[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowerCamelCase : int = ones
__lowerCamelCase : int = self.scale_features(
_a , output_range=[-1.0, 1.0] , clip=_a )
__lowerCamelCase : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_a , continuous_mask=_a , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowerCamelCase : Optional[int] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_a , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowerCamelCase : List[Any] = self.decode(
encodings_and_masks=_a , input_tokens=_a , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowerCamelCase : Optional[int] = self.scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__lowerCamelCase : List[Any] = self.scale_to_features(_a , input_range=[-1.0, 1.0] )
__lowerCamelCase : Union[str, Any] = mel[:1]
__lowerCamelCase : Union[str, Any] = mel.cpu().float().numpy()
__lowerCamelCase : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a )
logger.info('Generated segment' , _a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__lowerCamelCase : Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowerCamelCase : List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_a )
| 459
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =1
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : Any=2_0_0_0 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=2_0 , snake_case__ : Dict=1E-3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self : Optional[int] , snake_case__ : str , snake_case__ : Union[str, torch.device] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.linspace(1 , self.config.sampling_eps , snake_case__ , device=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : int , snake_case__ : int , snake_case__ : List[Any]=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE = -score / std
# compute
SCREAMING_SNAKE_CASE = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE = torch.sqrt(snake_case__ )
SCREAMING_SNAKE_CASE = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE = randn_tensor(x.shape , layout=x.layout , generator=snake_case__ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Any ):
"""simple docstring"""
return self.config.num_train_timesteps
| 718
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase_ ( _snake_case : str , _snake_case : int , _snake_case : int ) -> Any:
'''simple docstring'''
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
__magic_name__ : Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
__magic_name__ : List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
__magic_name__ : str = np.concatenate(_snake_case , axis=0 )
__magic_name__ : str = np.array(_snake_case ).astype(np.floataa ) / 255.0
__magic_name__ : Any = image.transpose(0 , 3 , 1 , 2 )
__magic_name__ : Any = 2.0 * image - 1.0
__magic_name__ : Dict = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
__magic_name__ : Optional[int] = torch.cat(_snake_case , dim=0 )
return image
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : str=0.9_995 ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_snake_case , np.ndarray ):
__magic_name__ : Dict = True
__magic_name__ : Optional[int] = va.device
__magic_name__ : Tuple = va.cpu().numpy()
__magic_name__ : int = va.cpu().numpy()
__magic_name__ : Optional[int] = np.sum(va * va / (np.linalg.norm(_snake_case ) * np.linalg.norm(_snake_case )) )
if np.abs(_snake_case ) > DOT_THRESHOLD:
__magic_name__ : List[Any] = (1 - t) * va + t * va
else:
__magic_name__ : Any = np.arccos(_snake_case )
__magic_name__ : Dict = np.sin(_snake_case )
__magic_name__ : Tuple = theta_a * t
__magic_name__ : Any = np.sin(_snake_case )
__magic_name__ : Optional[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
__magic_name__ : List[Any] = sin_theta_t / sin_theta_a
__magic_name__ : Dict = sa * va + sa * va
if inputs_are_torch:
__magic_name__ : Dict = torch.from_numpy(_snake_case ).to(_snake_case )
return va
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = F.normalize(_snake_case , dim=-1 )
__magic_name__ : List[str] = F.normalize(_snake_case , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
for param in model.parameters():
__magic_name__ : List[str] = value
class _snake_case ( snake_case ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ):
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__magic_name__ : Dict = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size["shortest_edge"]
)
__magic_name__ : int = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def SCREAMING_SNAKE_CASE ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__magic_name__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def SCREAMING_SNAKE_CASE ( self ):
self.enable_attention_slicing(_a )
def SCREAMING_SNAKE_CASE ( self ):
set_requires_grad(self.vae , _a )
def SCREAMING_SNAKE_CASE ( self ):
set_requires_grad(self.vae , _a )
def SCREAMING_SNAKE_CASE ( self ):
set_requires_grad(self.unet , _a )
def SCREAMING_SNAKE_CASE ( self ):
set_requires_grad(self.unet , _a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__magic_name__ : Optional[Any] = min(int(num_inference_steps * strength ) , _a )
__magic_name__ : Any = max(num_inference_steps - init_timestep , 0 )
__magic_name__ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
__magic_name__ : int = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
__magic_name__ : Union[str, Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
__magic_name__ : Any = torch.cat(_a , dim=0 )
else:
__magic_name__ : str = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__magic_name__ : int = 0.1_82_15 * init_latents
__magic_name__ : List[Any] = init_latents.repeat_interleave(_a , dim=0 )
__magic_name__ : List[str] = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
__magic_name__ : List[str] = self.scheduler.add_noise(_a , _a , _a )
__magic_name__ : List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Dict = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__magic_name__ : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__magic_name__ : Any = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : int = self.feature_extractor.preprocess(_a )
__magic_name__ : Union[str, Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__magic_name__ : List[str] = self.clip_model.get_image_features(_a )
__magic_name__ : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__magic_name__ : List[str] = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Dict = latents.detach().requires_grad_()
__magic_name__ : int = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__magic_name__ : Optional[Any] = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__magic_name__ : Union[str, Any] = self.scheduler.alphas_cumprod[timestep]
__magic_name__ : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : Union[str, Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__magic_name__ : List[Any] = torch.sqrt(_a )
__magic_name__ : Optional[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
__magic_name__ : Any = self.scheduler.sigmas[index]
__magic_name__ : Any = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__magic_name__ : List[Any] = 1 / 0.1_82_15 * sample
__magic_name__ : Optional[Any] = self.vae.decode(_a ).sample
__magic_name__ : int = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : Optional[int] = transforms.Resize(self.feature_extractor_size )(_a )
__magic_name__ : Tuple = self.normalize(_a ).to(latents.dtype )
__magic_name__ : List[Any] = self.clip_model.get_image_features(_a )
__magic_name__ : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__magic_name__ : str = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
__magic_name__ : List[str] = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
__magic_name__ : Any = latents.detach() + grads * (sigma**2)
__magic_name__ : Optional[int] = noise_pred_original
else:
__magic_name__ : Optional[Any] = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ):
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
__magic_name__ : List[Any] = [generator] + [None] * (batch_size - 1)
__magic_name__ : Any = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__magic_name__ : Optional[Any] = [x[0] for x in coca_is_none if x[1]]
__magic_name__ : str = ", ".join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__magic_name__ : int = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__magic_name__ : str = self.get_image_description(_a )
# get prompt text embeddings for content and style
__magic_name__ : Optional[int] = self.tokenizer(
_a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
__magic_name__ : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__magic_name__ : Tuple = self.tokenizer(
_a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
__magic_name__ : Optional[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__magic_name__ : Optional[Any] = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
__magic_name__ : Union[str, Any] = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
__magic_name__ : Union[str, Any] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__magic_name__ : Dict = {}
if accepts_offset:
__magic_name__ : int = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__magic_name__ , __magic_name__ : List[str] = self.get_timesteps(_a , _a , self.device )
__magic_name__ : Optional[Any] = timesteps[:1].repeat(_a )
# Preprocess image
__magic_name__ : int = preprocess(_a , _a , _a )
__magic_name__ : Tuple = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__magic_name__ : Any = preprocess(_a , _a , _a )
__magic_name__ : Dict = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__magic_name__ : Optional[Any] = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
__magic_name__ : Dict = self.get_clip_image_embeddings(_a , _a )
__magic_name__ : Optional[Any] = self.get_clip_image_embeddings(_a , _a )
__magic_name__ : List[Any] = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__magic_name__ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__magic_name__ : str = content_text_input.input_ids.shape[-1]
__magic_name__ : Optional[Any] = self.tokenizer([""] , padding="max_length" , max_length=_a , return_tensors="pt" )
__magic_name__ : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__magic_name__ : Union[str, Any] = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__magic_name__ : Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__magic_name__ : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__magic_name__ : Tuple = torch.randn(_a , generator=_a , device="cpu" , dtype=_a ).to(
self.device )
else:
__magic_name__ : Tuple = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__magic_name__ : str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__magic_name__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__magic_name__ : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__magic_name__ : Tuple = {}
if accepts_eta:
__magic_name__ : List[Any] = eta
# check if the scheduler accepts generator
__magic_name__ : int = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__magic_name__ : Tuple = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Optional[int] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__magic_name__ : Optional[int] = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Union[str, Any] = noise_pred.chunk(2 )
__magic_name__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__magic_name__ : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__magic_name__ , __magic_name__ : str = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : Optional[int] = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__magic_name__ : List[Any] = 1 / 0.1_82_15 * latents
__magic_name__ : Optional[int] = self.vae.decode(_a ).sample
__magic_name__ : int = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : str = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 124
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[Any] = {"vocab_file": "spiece.model"}
snake_case : Optional[Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
snake_case : Union[str, Any] = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , _a , _a=False , _a=False , _a=False , _a=None , _a=None , _a=None , _a=None , _a = None , **_a , ):
__magic_name__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__magic_name__ : Optional[int] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__magic_name__ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
__magic_name__ : str = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__magic_name__ : Dict = unk_token if pad_token is None else pad_token
__magic_name__ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
__magic_name__ : Optional[Any] = "<pad>" if pad_token is None else pad_token
__magic_name__ : Tuple = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : List[str] = remove_space
__magic_name__ : Tuple = keep_accents
__magic_name__ : Optional[Any] = vocab_file
__magic_name__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# Used for whitespace normalization in input texts
# fmt : off
__magic_name__ : Any = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__magic_name__ : Tuple = re.compile(
f'''[{"".join(map(_a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__( self ):
__magic_name__ : str = self.__dict__.copy()
__magic_name__ : str = None
return state
def __setstate__( self , _a ):
__magic_name__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : int = {}
__magic_name__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = self.non_printing_characters_re.sub("" , _a )
# Normalize whitespaces
__magic_name__ : Any = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__magic_name__ : Dict = unicodedata.normalize("NFC" , _a )
return text
def SCREAMING_SNAKE_CASE ( self , _a , **_a ):
__magic_name__ : str = self.preprocess_text(_a )
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
@staticmethod
def SCREAMING_SNAKE_CASE ( _a ):
return out_string
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = []
__magic_name__ : Optional[int] = ""
__magic_name__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : Optional[Any] = True
__magic_name__ : List[Any] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : str = False
out_string += self.sp_model.decode(_a )
return out_string
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Dict = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _a , _a = False ):
if isinstance(_a , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Any = self.sp_model.encode(_a )
else:
__magic_name__ : Union[str, Any] = [self.preprocess_text(_a ) for t in text]
__magic_name__ : List[str] = self.sp_model.encode(_a )
if return_tensors is True or return_tensors == "pt":
__magic_name__ : Any = torch.tensor(_a )
return token_ids
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.decode(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__magic_name__ : Union[str, Any] = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(_a ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=_a )
| 124
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( _A ) -> Dict:
"""simple docstring"""
snake_case__ = filter(lambda _A : p.requires_grad , model.parameters() )
snake_case__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : Any = logging.getLogger(__name__)
def a_ ( _A , _A ) -> Optional[int]:
"""simple docstring"""
if metric == "rouge2":
snake_case__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
snake_case__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
snake_case__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
snake_case__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
snake_case__ = ModelCheckpoint(
dirpath=_A , filename=_A , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def a_ ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=_A , verbose=_A , )
class __SCREAMING_SNAKE_CASE( pl.Callback ):
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[Any] , UpperCamelCase: Tuple ) -> Union[str, Any]:
snake_case__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase )
@rank_zero_only
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule , UpperCamelCase: str , UpperCamelCase: Optional[Any]=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
snake_case__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
snake_case__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case__ = od / 'test_results.txt'
snake_case__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
snake_case__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase )
generations_file.parent.mkdir(exist_ok=UpperCamelCase )
with open(UpperCamelCase , 'a+' ) as writer:
for key in sorted(UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case__ = metrics[key]
if isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = val.item()
snake_case__ = F'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
snake_case__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCamelCase )
@rank_zero_only
def lowerCAmelCase_ ( self: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] ) -> Optional[Any]:
try:
snake_case__ = pl_module.model.model.num_parameters()
except AttributeError:
snake_case__ = pl_module.model.num_parameters()
snake_case__ = count_trainable_parameters(UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase , UpperCamelCase , 'test' )
@rank_zero_only
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pl.Trainer , UpperCamelCase: Union[str, Any] ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 372
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "roformer"
def __init__( self: Tuple , UpperCamelCase: Optional[Any]=5_00_00 , UpperCamelCase: str=None , UpperCamelCase: Any=7_68 , UpperCamelCase: Dict=12 , UpperCamelCase: List[Any]=12 , UpperCamelCase: List[str]=30_72 , UpperCamelCase: int="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Any=15_36 , UpperCamelCase: Dict=2 , UpperCamelCase: Dict=0.02 , UpperCamelCase: List[str]=1e-12 , UpperCamelCase: int=0 , UpperCamelCase: Any=False , UpperCamelCase: int=True , **UpperCamelCase: List[Any] , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
snake_case__ = vocab_size
snake_case__ = hidden_size if embedding_size is None else embedding_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = rotary_value
snake_case__ = use_cache
class __SCREAMING_SNAKE_CASE( a_ ):
@property
def lowerCAmelCase_ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 372
| 1
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A : List[Any] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = question_encoder
SCREAMING_SNAKE_CASE = generator
SCREAMING_SNAKE_CASE = self.question_encoder
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] ):
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "question_encoder_tokenizer" )
SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "generator_tokenizer" )
self.question_encoder.save_pretrained(__lowerCamelCase )
self.generator.save_pretrained(__lowerCamelCase )
@classmethod
def _snake_case ( cls : Tuple , __lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
SCREAMING_SNAKE_CASE = kwargs.pop("config" , __lowerCamelCase )
if config is None:
SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
__lowerCamelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
__lowerCamelCase , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=__lowerCamelCase , generator=__lowerCamelCase )
def __call__( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ):
return self.current_tokenizer(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ):
return self.generator.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ):
return self.generator.decode(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.question_encoder
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.generator
def _snake_case ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "longest" , __lowerCamelCase : str = None , __lowerCamelCase : bool = True , **__lowerCamelCase : List[str] , ):
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , __lowerCamelCase , )
if max_length is None:
SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE = self(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE = self(
text_target=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = labels["input_ids"]
return model_inputs
| 16
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int | None = None , lowerCamelCase_ : int | None = None ):
if start is None:
__lowercase = 0
if end is None:
__lowercase = len(lowerCamelCase_ ) - 1
if start >= end:
return
__lowercase = (start + end) // 2
slowsort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
slowsort(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
if sequence[end] < sequence[mid]:
__lowercase , __lowercase = sequence[mid], sequence[end]
slowsort(lowerCamelCase_ , lowerCamelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 502
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case: Optional[int] = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def _snake_case ( A_ : str , A_ : int = 1 , A_ : str = "new" , A_ : list | None = None ):
"""simple docstring"""
a_ : List[str] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A_ ) - valid_terms ) ):
a_ : Union[str, Any] = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(A_ )
a_ : Any = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
a_ : Any = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A_ )}
a_ : str = {}
for id_ in range(A_ ):
a_ : Tuple = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 460
|
'''simple docstring'''
def _snake_case ( A_ : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Dict ):
"""simple docstring"""
a_ : Optional[int] = [False] * len(A_ )
a_ : List[str] = []
queue.append(A_ )
a_ : Union[str, Any] = True
while queue:
a_ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
a_ : Any = True
a_ : List[Any] = u
return visited[t]
def _snake_case ( A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : List[Any] ):
"""simple docstring"""
a_ : Dict = [-1] * (len(A_ ))
a_ : List[str] = 0
while bfs(A_ , A_ , A_ , A_ ):
a_ : Any = float("""Inf""" )
a_ : Any = sink
while s != source:
# Find the minimum value in select path
a_ : Any = min(A_ , graph[parent[s]][s] )
a_ : List[Any] = parent[s]
max_flow += path_flow
a_ : Tuple = sink
while v != source:
a_ : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a_ : List[Any] = parent[v]
return max_flow
__snake_case: List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case ,__snake_case: int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 460
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
A_ = u
for i in range(1, UpperCAmelCase__ ):
A_ = temp * (u - i)
return temp
def UpperCAmelCase__ ( ) -> None:
A_ = int(input("""enter the numbers of values: """ ) )
A_ = []
for _ in range(UpperCAmelCase__ ):
y.append([] )
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
y[i].append(UpperCAmelCase__ )
A_ = 0
print("""enter the values of parameters in a list: """ )
A_ = list(map(UpperCAmelCase__, input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCAmelCase__ ):
A_ = float(input() )
A_ = int(input("""enter the value to interpolate: """ ) )
A_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, UpperCAmelCase__ ):
for j in range(n - i ):
A_ = y[j + 1][i - 1] - y[j][i - 1]
A_ = y[0][0]
for i in range(1, UpperCAmelCase__ ):
summ += (ucal(UpperCAmelCase__, UpperCAmelCase__ ) * y[0][i]) / math.factorial(UpperCAmelCase__ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 288
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__lowerCamelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
config.addinivalue_line("""markers""", """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A_ = tmp_path_factory.getbasetemp() / """cache"""
A_ = test_hf_cache_home / """datasets"""
A_ = test_hf_cache_home / """metrics"""
A_ = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""", str(UpperCAmelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""", str(UpperCAmelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""", str(UpperCAmelCase__ ) )
A_ = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""", str(UpperCAmelCase__ ) )
A_ = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""", str(UpperCAmelCase__ ) )
@pytest.fixture(autouse=UpperCAmelCase__, scope="""session""" )
def UpperCAmelCase__ ( ) -> str:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""", UpperCAmelCase__ )
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""", UpperCAmelCase__ )
| 288
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _A ( UpperCamelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_encoder_blocks""" ) )
class _A :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]=13 , lowerCamelCase : Union[str, Any]=64 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Optional[int]=[2, 2, 2, 2] , lowerCamelCase : Union[str, Any]=[8, 4, 2, 1] , lowerCamelCase : Tuple=[16, 32, 64, 128] , lowerCamelCase : Any=[1, 4, 8, 16] , lowerCamelCase : Any=[1, 2, 4, 8] , lowerCamelCase : Dict=True , lowerCamelCase : List[str]=True , lowerCamelCase : Dict="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : int=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : List[str]=None , )-> Optional[int]:
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : str = num_channels
snake_case__ : int = num_encoder_blocks
snake_case__ : List[str] = sr_ratios
snake_case__ : List[Any] = depths
snake_case__ : str = hidden_sizes
snake_case__ : int = downsampling_rates
snake_case__ : str = num_attention_heads
snake_case__ : Optional[int] = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Dict = num_labels
snake_case__ : Dict = scope
def __lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Any )-> Optional[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : Any , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int )-> str:
snake_case__ : Optional[Any] = SegformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[int] = model(lowerCamelCase )
snake_case__ : Dict = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __lowerCAmelCase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Dict )-> List[Any]:
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Union[str, Any] = SegformerForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Tuple = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case__ : Union[str, Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __lowerCAmelCase ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : int )-> Union[str, Any]:
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = SegformerForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def __lowerCAmelCase ( self : Optional[Any] )-> List[str]:
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__ : List[Any] = config_and_inputs
snake_case__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = True
_lowercase = False
_lowercase = False
_lowercase = False
def __lowerCAmelCase ( self : Dict )-> str:
snake_case__ : Optional[Any] = SegformerModelTester(self )
snake_case__ : Optional[int] = SegformerConfigTester(self , config_class=lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple )-> List[str]:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowerCAmelCase ( self : str )-> Tuple:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase )
def __lowerCAmelCase ( self : Optional[Any] )-> int:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def __lowerCAmelCase ( self : List[Any] )-> int:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def __lowerCAmelCase ( self : List[Any] )-> List[str]:
pass
def __lowerCAmelCase ( self : str )-> List[str]:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __lowerCAmelCase ( self : List[str] )-> int:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = True
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = True
snake_case__ : int = False
snake_case__ : List[str] = True
snake_case__ : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[int] = outputs.attentions
snake_case__ : Union[str, Any] = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Union[str, Any] = True
snake_case__ : List[Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
snake_case__ : int = (self.model_tester.image_size // 4) ** 2
snake_case__ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case__ : int = (self.model_tester.image_size // 32) ** 2
snake_case__ : List[str] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case__ : List[Any] = len(lowerCamelCase )
# Check attention is always last and order is fine
snake_case__ : Optional[Any] = True
snake_case__ : Any = True
snake_case__ : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Any = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase ) )
snake_case__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
snake_case__ : str = (self.model_tester.image_size // 4) ** 2
snake_case__ : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __lowerCAmelCase ( self : Any )-> List[str]:
def check_hidden_states_output(lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[str] ):
snake_case__ : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Any = outputs.hidden_states
snake_case__ : Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Dict = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
if not self.model_tester.is_training:
return
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ):
continue
snake_case__ : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
snake_case__ : Any = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
snake_case__ : Optional[Any] = model(**lowerCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self : int )-> Union[str, Any]:
pass
@slow
def __lowerCAmelCase ( self : int )-> Union[str, Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = SegformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
snake_case__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : str )-> List[Any]:
snake_case__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
snake_case__ : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
snake_case__ : str = prepare_img()
snake_case__ : Tuple = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
snake_case__ : List[str] = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
snake_case__ : Optional[Any] = model(lowerCamelCase )
snake_case__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ : List[Any] = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : Optional[int] )-> Dict:
snake_case__ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
snake_case__ : int = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowerCamelCase )
snake_case__ : Dict = prepare_img()
snake_case__ : List[Any] = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
snake_case__ : Any = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
snake_case__ : Optional[Any] = model(lowerCamelCase )
snake_case__ : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-1 ) )
@slow
def __lowerCAmelCase ( self : Optional[Any] )-> Dict:
snake_case__ : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
snake_case__ : Optional[int] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
snake_case__ : Tuple = prepare_img()
snake_case__ : Dict = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
snake_case__ : Any = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
snake_case__ : List[Any] = model(lowerCamelCase )
snake_case__ : Dict = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(500, 300)] )
snake_case__ : List[Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
snake_case__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
snake_case__ : Any = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 703
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = 42
_lowercase = 42
class _A ( nn.Module ):
'''simple docstring'''
_lowercase = 42
_lowercase = (16, 32, 96, 256)
_lowercase = jnp.floataa
def __lowerCAmelCase ( self : Tuple )-> Any:
snake_case__ : Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case__ : List[str] = self.block_out_channels[i]
snake_case__ : Union[str, Any] = self.block_out_channels[i + 1]
snake_case__ : Optional[int] = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase )
snake_case__ : int = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase )
snake_case__ : Any = blocks
snake_case__ : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCamelCase : Any )-> Tuple:
snake_case__ : int = self.conv_in(lowerCamelCase )
snake_case__ : Dict = nn.silu(lowerCamelCase )
for block in self.blocks:
snake_case__ : Dict = block(lowerCamelCase )
snake_case__ : str = nn.silu(lowerCamelCase )
snake_case__ : Union[str, Any] = self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class _A ( nn.Module , UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
_lowercase = 32
_lowercase = 4
_lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase = False
_lowercase = (320, 640, 1280, 1280)
_lowercase = 2
_lowercase = 8
_lowercase = None
_lowercase = 1280
_lowercase = 0.0
_lowercase = False
_lowercase = jnp.floataa
_lowercase = True
_lowercase = 0
_lowercase = "rgb"
_lowercase = (16, 32, 96, 256)
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
snake_case__ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : Dict = jnp.zeros(lowerCamelCase , dtype=jnp.floataa )
snake_case__ : str = jnp.ones((1,) , dtype=jnp.intaa )
snake_case__ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case__ : Dict = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case__ : Optional[Any] = jnp.zeros(lowerCamelCase , dtype=jnp.floataa )
snake_case__ , snake_case__ : List[Any] = jax.random.split(lowerCamelCase )
snake_case__ : Optional[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )["params"]
def __lowerCAmelCase ( self : int )-> int:
snake_case__ : List[Any] = self.block_out_channels
snake_case__ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : Any = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case__ : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case__ : Union[str, Any] = FlaxTimestepEmbedding(lowerCamelCase , dtype=self.dtype )
snake_case__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case__ : Union[str, Any] = self.only_cross_attention
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : Optional[Any] = []
snake_case__ : str = []
snake_case__ : Tuple = block_out_channels[0]
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : Optional[int] = output_channel
snake_case__ : int = block_out_channels[i]
snake_case__ : Union[str, Any] = i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : int = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case__ : Any = FlaxDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
snake_case__ : str = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = down_blocks
snake_case__ : List[str] = controlnet_down_blocks
# mid
snake_case__ : Union[str, Any] = block_out_channels[-1]
snake_case__ : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : float = 1.0 , lowerCamelCase : bool = True , lowerCamelCase : bool = False , )-> Union[FlaxControlNetOutput, Tuple]:
snake_case__ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case__ : Union[str, Any] = jnp.flip(lowerCamelCase , axis=1 )
# 1. time
if not isinstance(lowerCamelCase , jnp.ndarray ):
snake_case__ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
snake_case__ : Optional[int] = jnp.expand_dims(lowerCamelCase , 0 )
snake_case__ : Any = self.time_proj(lowerCamelCase )
snake_case__ : List[Any] = self.time_embedding(lowerCamelCase )
# 2. pre-process
snake_case__ : Dict = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
snake_case__ : Any = self.conv_in(lowerCamelCase )
snake_case__ : Dict = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
snake_case__ : str = self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
snake_case__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ , snake_case__ : List[str] = down_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
else:
snake_case__ , snake_case__ : List[str] = down_block(lowerCamelCase , lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case__ : Optional[Any] = self.mid_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
# 5. contronet blocks
snake_case__ : List[str] = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase , self.controlnet_down_blocks ):
snake_case__ : List[str] = controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case__ : Optional[Any] = controlnet_down_block_res_samples
snake_case__ : Optional[int] = self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
snake_case__ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase , mid_block_res_sample=lowerCamelCase )
| 172
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.