code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Dict =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"""{test_file} instead.""" )
lowerCamelCase__: List[str] =components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowerCamelCase__: Dict =components[:-1] + [test_fn.replace(".py" , "" )]
lowerCamelCase__: Dict =".".join(__a )
return test_module_path
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: List[Any] =get_module_path(__a )
lowerCamelCase__: Any =importlib.import_module(__a )
return test_module
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Any =[]
lowerCamelCase__: Optional[Any] =get_test_module(__a )
for attr in dir(__a ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(__a , __a ) )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Any =[]
lowerCamelCase__: Any =get_test_module(__a )
for attr in dir(__a ):
lowerCamelCase__: List[str] =getattr(__a , __a )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase__: Optional[Any] =getattr(__a , "all_model_classes" , [] )
if len(__a ) > 0:
test_classes.append(__a )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =get_test_classes(__a )
lowerCamelCase__: Any =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =test_class()
if hasattr(__a , "setUp" ):
test.setUp()
lowerCamelCase__: List[str] =None
if hasattr(__a , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase__: Union[str, Any] =test.model_tester.__class__
return model_tester
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =get_test_classes(__a )
lowerCamelCase__: List[Any] =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__a )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =get_test_classes_for_model(__a , __a )
lowerCamelCase__: Optional[int] =[]
for test_class in test_classes:
lowerCamelCase__: Optional[Any] =get_model_tester_from_test_class(__a )
if tester_class is not None:
tester_classes.append(__a )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Optional[int] =get_test_classes(__a )
lowerCamelCase__: Any ={test_class: get_model_tester_from_test_class(__a ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =get_model_classes(__a )
lowerCamelCase__: int ={
model_class: get_test_classes_for_model(__a , __a ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =get_model_classes(__a )
lowerCamelCase__: List[str] ={
model_class: get_tester_classes_for_model(__a , __a ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if isinstance(__a , __a ):
return o
elif isinstance(__a , __a ):
return o.__name__
elif isinstance(__a , (list, tuple) ):
return [to_json(__a ) for x in o]
elif isinstance(__a , __a ):
return {to_json(__a ): to_json(__a ) for k, v in o.items()}
else:
return o
| 59
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : int = 'dpr'
def __init__( self : Tuple , A_ : List[str]=3_05_22 , A_ : Tuple=7_68 , A_ : str=12 , A_ : int=12 , A_ : Dict=30_72 , A_ : Optional[int]="gelu" , A_ : Tuple=0.1 , A_ : List[str]=0.1 , A_ : int=5_12 , A_ : List[str]=2 , A_ : int=0.02 , A_ : Tuple=1e-1_2 , A_ : List[Any]=0 , A_ : List[str]="absolute" , A_ : int = 0 , **A_ : Union[str, Any] , )-> Any:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = projection_dim
__UpperCamelCase = position_embedding_type
| 505
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = 20
lowerCAmelCase__ = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase__ )
# tweak scores to not be uniform anymore
lowerCAmelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase__ = jax.nn.softmax(lowerCamelCase__ , axis=-1 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create ramp distribution
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCAmelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCAmelCase__ = top_k_warp_safety_check(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase__ = np.exp(top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase__ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCAmelCase__ = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
# check that min length is applied at length 5
lowerCAmelCase__ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCAmelCase__ = 5
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = 15
lowerCAmelCase__ = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase__ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCAmelCase__ = 1
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase__ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCAmelCase__ = 4
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
lowerCAmelCase__ = 10
# no processor list
lowerCAmelCase__ = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# with processor list
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
lowerCAmelCase__ = 10
# no processor list
def run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
lowerCAmelCase__ = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
lowerCAmelCase__ = jax.jit(lowerCamelCase__ )
lowerCAmelCase__ = jax.jit(lowerCamelCase__ )
lowerCAmelCase__ = jitted_run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = jitted_run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 718
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 98
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_a : Tuple = False
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ):
return 12
@property
def __A ( self ):
return 12
@property
def __A ( self ):
return 32
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __A ( self ):
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a_ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = 12
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Any = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
_lowerCAmelCase : Optional[Any] = TransformeraDModel(**a_ )
return model
def __A ( self ):
_lowerCAmelCase : Optional[Any] = """cpu"""
_lowerCAmelCase : str = self.dummy_vqvae
_lowerCAmelCase : List[Any] = self.dummy_text_encoder
_lowerCAmelCase : int = self.dummy_tokenizer
_lowerCAmelCase : str = self.dummy_transformer
_lowerCAmelCase : List[Any] = VQDiffusionScheduler(self.num_embed )
_lowerCAmelCase : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=a_ )
_lowerCAmelCase : Optional[int] = VQDiffusionPipeline(
vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
_lowerCAmelCase : List[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCAmelCase : List[str] = """teddy bear playing in the pool"""
_lowerCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_lowerCAmelCase : Dict = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : int = torch.Generator(device=a_ ).manual_seed(0 )
_lowerCAmelCase : int = pipe(
[prompt] , generator=a_ , output_type="""np""" , return_dict=a_ , num_inference_steps=2 )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_lowerCAmelCase : Dict = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : List[str] = """cpu"""
_lowerCAmelCase : List[str] = self.dummy_vqvae
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : Optional[int] = self.dummy_tokenizer
_lowerCAmelCase : Optional[int] = self.dummy_transformer
_lowerCAmelCase : List[Any] = VQDiffusionScheduler(self.num_embed )
_lowerCAmelCase : Tuple = LearnedClassifierFreeSamplingEmbeddings(
learnable=a_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_lowerCAmelCase : str = VQDiffusionPipeline(
vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
_lowerCAmelCase : Optional[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCAmelCase : int = """teddy bear playing in the pool"""
_lowerCAmelCase : str = torch.Generator(device=a_ ).manual_seed(0 )
_lowerCAmelCase : Tuple = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_lowerCAmelCase : List[Any] = pipe(
[prompt] , generator=a_ , output_type="""np""" , return_dict=a_ , num_inference_steps=2 )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_lowerCAmelCase : Any = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
_lowerCAmelCase : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
_lowerCAmelCase : List[Any] = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_lowerCAmelCase : Dict = torch.Generator(device=a_ ).manual_seed(0 )
_lowerCAmelCase : str = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=a_ , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 213
|
def snake_case (UpperCamelCase : int = 2000000 ):
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(n + 1 )]
lowerCamelCase__ = 1
lowerCamelCase__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
for i in range(UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 165
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_A = 0
_A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_A = tuple[int, int]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a , _a , _a , _a , _a , ) -> None:
lowercase_ : List[Any] = pos_x
lowercase_ : str = pos_y
lowercase_ : List[str] = (pos_y, pos_x)
lowercase_ : Optional[Any] = goal_x
lowercase_ : Any = goal_y
lowercase_ : str = g_cost
lowercase_ : Union[str, Any] = parent
lowercase_ : str = self.calculate_heuristic()
lowercase_ : Dict = self.g_cost + self.h_cost
def _lowerCamelCase (self ) -> float:
lowercase_ : List[Any] = self.pos_x - self.goal_x
lowercase_ : str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_a ) + abs(_a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self , _a ) -> bool:
return self.f_cost < other.f_cost
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a ) -> List[Any]:
lowercase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
lowercase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
lowercase_ : Dict = [self.start]
lowercase_ : list[Node] = []
lowercase_ : Union[str, Any] = False
def _lowerCamelCase (self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase_ : Any = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_a )
self.closed_nodes.append(_a )
lowercase_ : Optional[Any] = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
lowercase_ : Dict = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
return [self.start.pos]
def _lowerCamelCase (self , _a ) -> list[Node]:
lowercase_ : Tuple = []
for action in delta:
lowercase_ : str = parent.pos_x + action[1]
lowercase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def _lowerCamelCase (self , _a ) -> list[TPosition]:
lowercase_ : Any = node
lowercase_ : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase_ : str = current_node.parent
path.reverse()
return path
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a ) -> None:
lowercase_ : List[Any] = AStar(_a , _a )
lowercase_ : Tuple = AStar(_a , _a )
lowercase_ : Union[str, Any] = False
def _lowerCamelCase (self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase_ : Any = self.fwd_astar.open_nodes.pop(0 )
lowercase_ : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_a , _a )
self.fwd_astar.closed_nodes.append(_a )
self.bwd_astar.closed_nodes.append(_a )
lowercase_ : List[str] = current_bwd_node
lowercase_ : str = current_fwd_node
lowercase_ : int = {
self.fwd_astar: self.fwd_astar.get_successors(_a ),
self.bwd_astar: self.bwd_astar.get_successors(_a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_a )
else:
# retrieve the best current path
lowercase_ : List[str] = astar.open_nodes.pop(
astar.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_a )
else:
astar.open_nodes.append(_a )
return [self.fwd_astar.start.pos]
def _lowerCamelCase (self , _a , _a ) -> list[TPosition]:
lowercase_ : List[str] = self.fwd_astar.retrace_path(_a )
lowercase_ : Dict = self.bwd_astar.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
lowercase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_A = (0, 0)
_A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A = time.time()
_A = AStar(init, goal)
_A = a_star.search()
_A = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_A = time.time()
_A = BidirectionalAStar(init, goal)
_A = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 438
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a=12 , _a=7 , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=0 , _a=None , ) -> Optional[Any]:
lowercase_ : List[Any] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Optional[int] = use_input_mask
lowercase_ : Any = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : str = projection_dim
lowercase_ : str = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[Any] = dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Any = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[Any] = bos_token_id
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase_ : Dict = input_mask.numpy()
lowercase_ ,lowercase_ : int = input_mask.shape
lowercase_ : int = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_a ):
lowercase_ : Tuple = 1
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_a )
def _lowerCamelCase (self ) -> Any:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowerCamelCase (self , _a , _a , _a ) -> List[Any]:
lowercase_ : List[Any] = TFBlipTextModel(config=_a )
lowercase_ : List[str] = model(_a , attention_mask=_a , training=_a )
lowercase_ : Union[str, Any] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ ,lowercase_ ,lowercase_ : Dict = config_and_inputs
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A : Any = (TFBlipTextModel,) if is_tf_available() else ()
A : Union[str, Any] = False
A : int = False
A : str = False
def _lowerCamelCase (self ) -> int:
lowercase_ : Tuple = BlipTextModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCamelCase (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCamelCase (self ) -> Dict:
pass
def _lowerCamelCase (self ) -> Optional[int]:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _lowerCamelCase (self ) -> List[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _lowerCamelCase (self ) -> Any:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _lowerCamelCase (self ) -> Optional[int]:
pass
@slow
def _lowerCamelCase (self ) -> Optional[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = TFBlipTextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowerCamelCase (self , _a=True ) -> Optional[int]:
super().test_pt_tf_model_equivalence(allow_missing_keys=_a )
| 438
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 32
def _lowerCamelCase ( __A : Accelerator , __A : int = 16 ) -> Optional[int]:
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCAmelCase : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__A : Dict ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Dict = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__A : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : Dict = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : List[Any] = 8
else:
_UpperCAmelCase : List[Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( __A : List[Any] , __A : Union[str, Any] ) -> Optional[Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __SCREAMING_SNAKE_CASE ) == "1":
_UpperCAmelCase : int = 2
# New Code #
_UpperCAmelCase : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
_UpperCAmelCase : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Tuple = config['''lr''']
_UpperCAmelCase : Any = int(config['''num_epochs'''] )
_UpperCAmelCase : int = int(config['''seed'''] )
_UpperCAmelCase : Optional[Any] = int(config['''batch_size'''] )
_UpperCAmelCase : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : List[str] = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase : Optional[int] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = model(**__SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**__SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ) -> List[str]:
_UpperCAmelCase : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 485
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
__SCREAMING_SNAKE_CASE : List[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
__SCREAMING_SNAKE_CASE : Dict = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
return image
def A_ ( __SCREAMING_SNAKE_CASE : Any ) -> int:
if "visual_encoder" in key:
__SCREAMING_SNAKE_CASE : List[Any] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __SCREAMING_SNAKE_CASE )
if "blocks" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''blocks''' , '''layers''' , __SCREAMING_SNAKE_CASE )
if "attn" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''attn''' , '''self_attn''' , __SCREAMING_SNAKE_CASE )
if "norm1" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R'''norm1''' , '''layer_norm1''' , __SCREAMING_SNAKE_CASE )
if "norm2" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''norm2''' , '''layer_norm2''' , __SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''encoder.norm''' , '''post_layernorm''' , __SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
__SCREAMING_SNAKE_CASE : Tuple = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
__SCREAMING_SNAKE_CASE : List[str] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , __SCREAMING_SNAKE_CASE )
if "self_attn" in key:
__SCREAMING_SNAKE_CASE : List[Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , __SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def A_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=None ) -> Optional[Any]:
if config_path is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : List[str] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
__SCREAMING_SNAKE_CASE : int = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__SCREAMING_SNAKE_CASE : Any = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=3_84 , vit='''base''' )
__SCREAMING_SNAKE_CASE : int = pt_model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : Union[str, Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = rename_key(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = value
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = 3_84
__SCREAMING_SNAKE_CASE : List[Any] = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(['''a picture of'''] ).input_ids
__SCREAMING_SNAKE_CASE : List[Any] = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
__SCREAMING_SNAKE_CASE : List[str] = hf_model.generate(__SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='''base''' )
vqa_model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : Dict = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = rename_key(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = value
__SCREAMING_SNAKE_CASE : Optional[Any] = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''How many dogs are in this image?''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : Optional[int] = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__SCREAMING_SNAKE_CASE : Any = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='''base''' )
itm_model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : Any = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = rename_key(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = value
__SCREAMING_SNAKE_CASE : Union[str, Any] = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = ['''A picture of a woman with a dog sitting in a beach''']
__SCREAMING_SNAKE_CASE : Any = tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding='''max_length''' , truncation=__SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
__SCREAMING_SNAKE_CASE : Dict = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_A = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 158
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__( unittest.TestCase ):
def __init__( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Union[str, Any]=1_8 , __UpperCamelCase : Union[str, Any]=3_0 , __UpperCamelCase : str=4_0_0 , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """apply_ocr""" ) )
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __lowerCAmelCase( self : int ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __UpperCamelCase )
self.assertIsInstance(encoding.boxes , __UpperCamelCase )
# Test batched
snake_case__ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case__ = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case__ = image_processing(__UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCamelCase )
self.assertListEqual(encoding.boxes , __UpperCamelCase )
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase )
snake_case__ = image_processing(__UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 566
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : str = """informer"""
UpperCAmelCase_ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "student_t" , __UpperCamelCase : str = "nll" , __UpperCamelCase : int = 1 , __UpperCamelCase : List[int] = None , __UpperCamelCase : Optional[Union[str, bool]] = "mean" , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : int = 6_4 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : bool = True , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.05 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : int = 1_0_0 , __UpperCamelCase : float = 0.02 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : str = "prob" , __UpperCamelCase : int = 5 , __UpperCamelCase : bool = True , **__UpperCamelCase : Any , ):
'''simple docstring'''
snake_case__ = prediction_length
snake_case__ = context_length or prediction_length
snake_case__ = distribution_output
snake_case__ = loss
snake_case__ = input_size
snake_case__ = num_time_features
snake_case__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case__ = scaling
snake_case__ = num_dynamic_real_features
snake_case__ = num_static_real_features
snake_case__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case__ = cardinality
else:
snake_case__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case__ = embedding_dimension
else:
snake_case__ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ = num_parallel_samples
# Transformer architecture configuration
snake_case__ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ = d_model
snake_case__ = encoder_attention_heads
snake_case__ = decoder_attention_heads
snake_case__ = encoder_ffn_dim
snake_case__ = decoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = decoder_layers
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = encoder_layerdrop
snake_case__ = decoder_layerdrop
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = use_cache
# Informer
snake_case__ = attention_type
snake_case__ = sampling_factor
snake_case__ = distil
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCAmelCase( self : str ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 566
| 1
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A_ : int = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
A_ : Dict = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A_ : Optional[Any] = soup.find('meta', {'property': 'og:image'})['content']
A_ : List[Any] = requests.get(image_url).content
A_ : Optional[int] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 303
| 0
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
# load base model
a : List[Any] = StableDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a : Optional[int] = load_file(_A )
a : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a : int = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
a : Any = pipeline.text_encoder
else:
a : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
a : str = pipeline.unet
# find the target layer
a : Optional[Any] = layer_infos.pop(0 )
while len(_A ) > -1:
try:
a : Optional[int] = curr_layer.__getattr__(_A )
if len(_A ) > 0:
a : Union[str, Any] = layer_infos.pop(0 )
elif len(_A ) == 0:
break
except Exception:
if len(_A ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a : List[str] = layer_infos.pop(0 )
a : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_A )
else:
pair_keys.append(_A )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a : Optional[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_A , _A ).unsqueeze(2 ).unsqueeze(3 )
else:
a : str = state_dict[pair_keys[0]].to(torch.floataa )
a : str = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_A , _A )
# update visited list
for item in pair_keys:
visited.append(_A )
return pipeline
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
lowerCAmelCase: List[str] = parser.parse_args()
lowerCAmelCase: List[str] = args.base_model_path
lowerCAmelCase: List[str] = args.checkpoint_path
lowerCAmelCase: List[str] = args.dump_path
lowerCAmelCase: List[Any] = args.lora_prefix_unet
lowerCAmelCase: Optional[int] = args.lora_prefix_text_encoder
lowerCAmelCase: Optional[Any] = args.alpha
lowerCAmelCase: Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase: Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 704
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__( metaclass=lowerCamelCase__ ):
lowercase__ = ["""torch""", """torchsde"""]
def __init__( self : Any , *__snake_case : List[str] , **__snake_case : Tuple ):
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def lowercase_ ( cls : List[Any] , *__snake_case : str , **__snake_case : List[Any] ):
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def lowercase_ ( cls : Dict , *__snake_case : Optional[Any] , **__snake_case : str ):
requires_backends(cls , ['torch', 'torchsde'] )
| 195
| 0
|
import numpy as np
from PIL import Image
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
lowerCAmelCase__ : Dict = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Tuple = 0
# compute the shape of the output matrix
lowerCAmelCase__ : List[str] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase__ : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase__ : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Tuple = 0
return updated_arr
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
lowerCAmelCase__ : Optional[Any] = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Dict = 0
# compute the shape of the output matrix
lowerCAmelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase__ : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase__ : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCAmelCase_ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 678
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ : List[Any] = "src/diffusers"
a_ : Any = "."
# This is to make sure the diffusers module imported is the one in the repo.
a_ : Union[str, Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ : Union[str, Any] = spec.loader.load_module()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , _UpperCamelCase ) is not None
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = object_name.split('.' )
SCREAMING_SNAKE_CASE = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_UpperCamelCase , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
return "".join(_UpperCamelCase )
a_ : Optional[int] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a_ : Union[str, Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
a_ : Optional[Any] = re.compile(R"<FILL\s+[^>]*>")
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = code.split('\n' )
SCREAMING_SNAKE_CASE = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE = f"""class Bla:\n{code}"""
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_UpperCamelCase )
SCREAMING_SNAKE_CASE = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = style_docstrings_in_code(_UpperCamelCase )
return result[len('class Bla:\n' ) :] if has_indent else result
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = search.groups()
SCREAMING_SNAKE_CASE = find_code_in_diffusers(_UpperCamelCase )
SCREAMING_SNAKE_CASE = get_indent(_UpperCamelCase )
SCREAMING_SNAKE_CASE = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE = theoretical_indent
SCREAMING_SNAKE_CASE = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
SCREAMING_SNAKE_CASE = lines[line_index]
SCREAMING_SNAKE_CASE = _should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(f"""^{indent}# End copy""" , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
SCREAMING_SNAKE_CASE = ''.join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_UpperCamelCase ) is None]
SCREAMING_SNAKE_CASE = '\n'.join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE = replace_pattern.replace('with' , '' ).split(',' )
SCREAMING_SNAKE_CASE = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pattern.groups()
SCREAMING_SNAKE_CASE = re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE = re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
SCREAMING_SNAKE_CASE = re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCamelCase )
return diffs
def __lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = glob.glob(os.path.join(_UpperCamelCase , '**/*.py' ) , recursive=_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
for filename in all_files:
SCREAMING_SNAKE_CASE = is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE = '\n'.join(_UpperCamelCase )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 700
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : Any = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['LayoutLMv2FeatureExtractor']
__SCREAMING_SNAKE_CASE : List[str] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 670
|
from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 670
| 1
|
import inspect
import unittest
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
snake_case__ : Dict =inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case__ : Optional[int] ='''k-diffusion'''
elif backend == "invisible_watermark":
snake_case__ : int ='''invisible-watermark'''
assert backend in deps, f'''{backend} is not in the deps table!'''
| 408
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCamelCase__ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowercase_ ( SCREAMING_SNAKE_CASE : str = "mumbai" ):
"""simple docstring"""
snake_case__ : Optional[int] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
snake_case__ : Optional[Any] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
snake_case__ : Dict =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 408
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _snake_case :
"""simple docstring"""
a = PegasusConfig
a = {}
a = "gelu"
def __init__( self : Union[str, Any] , _A : Any , _A : Tuple=1_3 , _A : str=7 , _A : Optional[Any]=True , _A : Optional[Any]=False , _A : Dict=9_9 , _A : Union[str, Any]=3_2 , _A : Dict=5 , _A : str=4 , _A : Any=3_7 , _A : Tuple=0.1 , _A : Optional[Any]=0.1 , _A : Optional[Any]=2_0 , _A : List[str]=2 , _A : Union[str, Any]=1 , _A : int=0 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : int = seq_length
_SCREAMING_SNAKE_CASE : Optional[int] = is_training
_SCREAMING_SNAKE_CASE : Optional[int] = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_SCREAMING_SNAKE_CASE : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_SCREAMING_SNAKE_CASE : int = np.concatenate([input_ids, eos_tensor] , axis=1)
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : Dict = prepare_pegasus_inputs_dict(lowercase_ , lowercase_ , lowercase_)
return config, inputs_dict
def _lowerCAmelCase ( self : Any , _A : Dict , _A : Tuple , _A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = 2_0
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name(lowercase_)
_SCREAMING_SNAKE_CASE : int = model.encode(inputs_dict["""input_ids"""])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
_SCREAMING_SNAKE_CASE : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
_SCREAMING_SNAKE_CASE : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
_SCREAMING_SNAKE_CASE : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
_SCREAMING_SNAKE_CASE : int = model.decode(lowercase_ , lowercase_)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def _lowerCAmelCase ( self : List[str] , _A : Optional[int] , _A : int , _A : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = 2_0
_SCREAMING_SNAKE_CASE : List[Any] = model_class_name(lowercase_)
_SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict["""input_ids"""])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE : Dict = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
_SCREAMING_SNAKE_CASE : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
_SCREAMING_SNAKE_CASE : Tuple = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
_SCREAMING_SNAKE_CASE : List[Any] = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_)
_SCREAMING_SNAKE_CASE : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , )-> int:
if attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = np.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a = True
a = False
a = False
a = False
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxPegasusModelTester(self)
_SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=lowercase_)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(lowercase_ , lowercase_)
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowercase_)
@jax.jit
def encode_jitted(_A : Dict , _A : Any=None , **_A : Union[str, Any]):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_)
with self.subTest("""JIT Enabled"""):
_SCREAMING_SNAKE_CASE : List[str] = encode_jitted(**lowercase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Any = encode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowercase_)
_SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
_SCREAMING_SNAKE_CASE : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Dict , _A : Dict , _A : Optional[int]):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("""JIT Enabled"""):
_SCREAMING_SNAKE_CASE : List[Any] = decode_jitted(**lowercase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Optional[int] = decode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase_)
_SCREAMING_SNAKE_CASE : str = np.ones((1, 1))
_SCREAMING_SNAKE_CASE : int = model(lowercase_)
self.assertIsNotNone(lowercase_)
@slow
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
_SCREAMING_SNAKE_CASE : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
_SCREAMING_SNAKE_CASE : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_SCREAMING_SNAKE_CASE : List[str] = [
"""California\'s largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.""",
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""np""" , truncation=lowercase_ , max_length=5_1_2 , padding=lowercase_)
_SCREAMING_SNAKE_CASE : Dict = model.generate(**lowercase_ , num_beams=2).sequences
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_)
assert tgt_text == decoded
| 338
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = "falcon"
__SCREAMING_SNAKE_CASE : List[Any] = ["past_key_values"]
def __init__( self , lowercase_=6_5_0_2_4 , lowercase_=4_5_4_4 , lowercase_=3_2 , lowercase_=7_1 , lowercase_=1E-5 , lowercase_=0.0_2 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=1_1 , lowercase_=1_1 , **lowercase_ , ) -> Any:
UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase = kwargs.pop('n_embed' , lowercase_ )
UpperCAmelCase = hidden_size if n_embed is None else n_embed
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase = alibi
UpperCAmelCase = new_decoder_architecture
UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase = parallel_attn
UpperCAmelCase = bias
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> List[Any]:
return self.hidden_size // self.num_attention_heads
@property
def a_ ( self ) -> List[str]:
return not self.alibi
| 373
| 0
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCamelCase__ : Optional[int] = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCamelCase__ : Dict = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
UpperCamelCase__ : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(__lowerCAmelCase , ' ' , __lowerCAmelCase )
def white_space_fix(_SCREAMING_SNAKE_CASE : int ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )]
return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 100
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [rgram for rgrams in rgramslist for rgram in rgrams]
SCREAMING_SNAKE_CASE_ = Counter(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Counter(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Counter()
for sgram, scount in sgramcounter.items():
SCREAMING_SNAKE_CASE_ = scount * numref
SCREAMING_SNAKE_CASE_ = Counter(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Counter()
for cgram, ccount in cgramcounter.items():
SCREAMING_SNAKE_CASE_ = ccount * numref
# KEEP
SCREAMING_SNAKE_CASE_ = sgramcounter_rep & cgramcounter_rep
SCREAMING_SNAKE_CASE_ = keepgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE_ = sgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ = keeptmpscorea / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
SCREAMING_SNAKE_CASE_ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
SCREAMING_SNAKE_CASE_ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
SCREAMING_SNAKE_CASE_ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
SCREAMING_SNAKE_CASE_ = sgramcounter_rep - cgramcounter_rep
SCREAMING_SNAKE_CASE_ = delgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE_ = sgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE_ = 1
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ = deltmpscorea / len(__lowerCAmelCase )
# ADDITION
SCREAMING_SNAKE_CASE_ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = set(__lowerCAmelCase ) & set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ = addtmpscore / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ = addtmpscore / len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
if addscore_precision > 0 or addscore_recall > 0:
SCREAMING_SNAKE_CASE_ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ssent.split(' ' )
SCREAMING_SNAKE_CASE_ = csent.split(' ' )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for rsent in rsents:
SCREAMING_SNAKE_CASE_ = rsent.split(' ' )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
SCREAMING_SNAKE_CASE_ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
SCREAMING_SNAKE_CASE_ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
SCREAMING_SNAKE_CASE_ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
SCREAMING_SNAKE_CASE_ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
SCREAMING_SNAKE_CASE_ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
SCREAMING_SNAKE_CASE_ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(__lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
SCREAMING_SNAKE_CASE_ = sum([delascore, delascore, delascore, delascore] ) / 4
SCREAMING_SNAKE_CASE_ = sum([addascore, addascore, addascore, addascore] ) / 4
SCREAMING_SNAKE_CASE_ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
if lowercase:
SCREAMING_SNAKE_CASE_ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
SCREAMING_SNAKE_CASE_ = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase )
elif tokenizer == "moses":
SCREAMING_SNAKE_CASE_ = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase )
elif tokenizer == "penn":
SCREAMING_SNAKE_CASE_ = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sentence
if not return_str:
SCREAMING_SNAKE_CASE_ = normalized_sent.split()
return normalized_sent
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )):
raise ValueError('Sources length must match predictions and references lengths.' )
SCREAMING_SNAKE_CASE_ = 0
for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] )
SCREAMING_SNAKE_CASE_ = sari_score / len(__lowerCAmelCase )
return 100 * sari_score
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str]="exp" , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : str=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
SCREAMING_SNAKE_CASE_ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE_ = sacrebleu.corpus_bleu(
__lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCAmelCase__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'),
}) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowerCAmelCase__ ( self , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = {}
result.update({'sari': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase)})
result.update({'sacrebleu': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase)})
result.update({'exact': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase)})
return result
| 709
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case ( unittest.TestCase , lowerCAmelCase__ ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering')
self.tool.setup()
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
| 620
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def UpperCAmelCase__ ( lowerCAmelCase__ :Sequence[float] , lowerCAmelCase__ :float ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :Sequence[float] , lowerCAmelCase__ :float ) -> float:
'''simple docstring'''
lowercase = 0.0
for coeff in reversed(lowerCAmelCase__ ):
lowercase = result * x + coeff
return result
if __name__ == "__main__":
__lowerCAmelCase : Tuple =(0.0, 0.0, 5.0, 9.3, 7.0)
__lowerCAmelCase : Tuple =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 359
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , lowerCAmelCase__ , )
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
lowercase = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase , lowercase = image[0].size
lowercase , lowercase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowercase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowercase = np.concatenate(lowerCAmelCase__ , axis=0 )
lowercase = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
lowercase = image.transpose(0 , 3 , 1 , 2 )
lowercase = 2.0 * image - 1.0
lowercase = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowercase = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[List, PIL.Image.Image, torch.Tensor] ) -> Dict:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
lowercase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowercase , lowercase = mask[0].size
lowercase , lowercase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
lowercase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
lowercase = np.concatenate(lowerCAmelCase__ , axis=0 )
lowercase = mask.astype(np.floataa ) / 255.0
lowercase = 0
lowercase = 1
lowercase = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
lowercase = torch.cat(lowerCAmelCase__ , dim=0 )
return mask
class _A ( lowerCAmelCase ):
snake_case__ : UNetaDModel
snake_case__ : RePaintScheduler
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 250 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 10 , __lowerCAmelCase = 10 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ):
"""simple docstring"""
lowercase = image
lowercase = _preprocess_image(__lowerCAmelCase )
lowercase = original_image.to(device=self.device , dtype=self.unet.dtype )
lowercase = _preprocess_mask(__lowerCAmelCase )
lowercase = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowercase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase = original_image.shape
lowercase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.device )
lowercase = eta
lowercase = self.scheduler.timesteps[0] + 1
lowercase = generator[0] if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowercase = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute previous image: x_t -> x_t-1
lowercase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowercase = self.scheduler.undo_step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = t
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 359
| 1
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase :Tuple = logging.get_logger(__name__)
__lowerCAmelCase :str = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
__lowerCAmelCase :Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
__lowerCAmelCase :str = {
'jukebox': 5_12,
}
class _a( __A ):
lowerCamelCase__ :Any = VOCAB_FILES_NAMES
lowerCamelCase__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ :int = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCamelCase__ :Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case=["v3", "v2", "v2"] , __snake_case=5_1_2 , __snake_case=5 , __snake_case="<|endoftext|>" , **__snake_case , ) -> str:
'''simple docstring'''
_snake_case : List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
super().__init__(
unk_token=__snake_case , n_genres=__snake_case , version=__snake_case , max_n_lyric_tokens=__snake_case , **__snake_case , )
_snake_case : List[Any] = version
_snake_case : Optional[Any] = max_n_lyric_tokens
_snake_case : Optional[int] = n_genres
with open(__snake_case , encoding="utf-8" ) as vocab_handle:
_snake_case : List[str] = json.load(__snake_case )
with open(__snake_case , encoding="utf-8" ) as vocab_handle:
_snake_case : List[str] = json.load(__snake_case )
with open(__snake_case , encoding="utf-8" ) as vocab_handle:
_snake_case : List[str] = json.load(__snake_case )
_snake_case : int = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
_snake_case : List[str] = oov.replace(r"\-'" , r"\-+'" )
_snake_case : List[Any] = regex.compile(__snake_case )
_snake_case : Tuple = {v: k for k, v in self.artists_encoder.items()}
_snake_case : Optional[Any] = {v: k for k, v in self.genres_encoder.items()}
_snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowercase ( self , __snake_case , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
_snake_case : List[str] = [self.artists_encoder.get(__snake_case , 0 ) for artist in list_artists]
for genres in range(len(__snake_case ) ):
_snake_case : List[Any] = [self.genres_encoder.get(__snake_case , 0 ) for genre in list_genres[genres]]
_snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_snake_case : Tuple = [[self.lyrics_encoder.get(__snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowercase ( self , __snake_case ) -> Any:
'''simple docstring'''
return list(__snake_case )
def lowercase ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Tuple = self.prepare_for_tokenization(__snake_case , __snake_case , __snake_case )
_snake_case : str = self._tokenize(__snake_case )
return artist, genre, lyrics
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case = False ) -> Tuple[str, str, str, Dict[str, Any]]:
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_snake_case : Dict = artists[idx].lower()
_snake_case : Any = [genres[idx].lower()]
else:
_snake_case : List[Any] = self._normalize(artists[idx] ) + ".v2"
_snake_case : Union[str, Any] = [
self._normalize(__snake_case ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_snake_case : List[Any] = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
_snake_case : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
_snake_case : Tuple = {vocab[index]: index + 1 for index in range(len(__snake_case ) )}
_snake_case : Union[str, Any] = 0
_snake_case : Tuple = len(__snake_case ) + 1
_snake_case : Dict = self.vocab
_snake_case : Dict = {v: k for k, v in self.vocab.items()}
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[Any] = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
_snake_case : Optional[Any] = self._run_strip_accents(__snake_case )
_snake_case : str = lyrics.replace("\\" , "\n" )
_snake_case : str = self.out_of_vocab.sub("" , __snake_case ), [], []
return artists, genres, lyrics
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Union[str, Any] = unicodedata.normalize("NFD" , __snake_case )
_snake_case : Optional[Any] = []
for char in text:
_snake_case : List[Any] = unicodedata.category(__snake_case )
if cat == "Mn":
continue
output.append(__snake_case )
return "".join(__snake_case )
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Any = (
[chr(__snake_case ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(__snake_case ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(__snake_case ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
_snake_case : List[Any] = frozenset(__snake_case )
_snake_case : List[Any] = re.compile(r"_+" )
_snake_case : str = "".join([c if c in accepted else "_" for c in text.lower()] )
_snake_case : Optional[int] = pattern.sub("_" , __snake_case ).strip("_" )
return text
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
return " ".join(__snake_case )
def lowercase ( self , __snake_case , __snake_case = None , __snake_case = False ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
_snake_case : int = TensorType(__snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
_snake_case : Optional[int] = tf.constant
_snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
_snake_case : List[Any] = torch.tensor
_snake_case : Optional[int] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
_snake_case : List[str] = jnp.array
_snake_case : Any = _is_jax
else:
_snake_case : Dict = np.asarray
_snake_case : str = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_snake_case : Union[str, Any] = [inputs]
if not is_tensor(__snake_case ):
_snake_case : Any = as_tensor(__snake_case )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , __snake_case , __snake_case , __snake_case="" , __snake_case="pt" ) -> BatchEncoding:
'''simple docstring'''
_snake_case : str = [0, 0, 0]
_snake_case : Union[str, Any] = [artist] * len(self.version )
_snake_case : Optional[Any] = [genres] * len(self.version )
_snake_case , _snake_case , _snake_case : Tuple = self.tokenize(__snake_case , __snake_case , __snake_case )
_snake_case , _snake_case , _snake_case : Any = self._convert_token_to_id(__snake_case , __snake_case , __snake_case )
_snake_case : Dict = [-INFINITY] * len(full_tokens[-1] )
_snake_case : str = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def lowercase ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case : Dict = os.path.join(
__snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__snake_case ) )
_snake_case : Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__snake_case ) )
_snake_case : Tuple = os.path.join(
__snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__snake_case ) )
return (artists_file, genres_file, lyrics_file)
def lowercase ( self , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
_snake_case : Dict = self.artists_decoder.get(__snake_case )
_snake_case : Dict = [self.genres_decoder.get(__snake_case ) for genre in genres_index]
_snake_case : Dict = [self.lyrics_decoder.get(__snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 278
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _a:
def __init__( self , __snake_case , ) -> List[str]:
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Any = 1_3
_snake_case : Union[str, Any] = 7
_snake_case : Any = True
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : str = True
_snake_case : int = 9_9
_snake_case : Optional[int] = 3_2
_snake_case : Dict = 2
_snake_case : Optional[Any] = 4
_snake_case : Any = 3_7
_snake_case : Optional[int] = "gelu"
_snake_case : Optional[Any] = 0.1
_snake_case : List[Any] = 0.1
_snake_case : Any = 5_1_2
_snake_case : Optional[int] = 1_6
_snake_case : Dict = 2
_snake_case : Optional[int] = 0.02
_snake_case : Any = 3
_snake_case : Tuple = 4
_snake_case : str = None
def lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = None
if self.use_input_mask:
_snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : str = None
_snake_case : Optional[Any] = None
_snake_case : Any = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
_snake_case : str = TFDistilBertModel(config=__snake_case )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Union[str, Any] = model(__snake_case )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : int = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
_snake_case : str = TFDistilBertForMaskedLM(config=__snake_case )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : int = TFDistilBertForQuestionAnswering(config=__snake_case )
_snake_case : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
_snake_case : Tuple = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Dict = self.num_labels
_snake_case : Dict = TFDistilBertForSequenceClassification(__snake_case )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Optional[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
_snake_case : str = self.num_choices
_snake_case : List[str] = TFDistilBertForMultipleChoice(__snake_case )
_snake_case : Optional[int] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
_snake_case : List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : int = TFDistilBertForTokenClassification(__snake_case )
_snake_case : Any = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self ) -> Tuple:
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : List[str] = config_and_inputs
_snake_case : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a( __A , __A , unittest.TestCase ):
lowerCamelCase__ :str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCamelCase__ :Tuple = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ :Tuple = False
lowerCamelCase__ :Any = False
def lowercase ( self ) -> int:
'''simple docstring'''
_snake_case : Optional[Any] = TFDistilBertModelTester(self )
_snake_case : str = ConfigTester(self , config_class=__snake_case , dim=3_7 )
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__snake_case )
def lowercase ( self ) -> Any:
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__snake_case )
def lowercase ( self ) -> List[str]:
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__snake_case )
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__snake_case )
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__snake_case )
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__snake_case )
@slow
def lowercase ( self ) -> Tuple:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_snake_case : Any = TFDistilBertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class _a( unittest.TestCase ):
@slow
def lowercase ( self ) -> List[str]:
'''simple docstring'''
_snake_case : Any = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
_snake_case : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Dict = model(__snake_case )[0]
_snake_case : str = [1, 6, 7_6_8]
self.assertEqual(output.shape , __snake_case )
_snake_case : Optional[Any] = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-4 )
| 278
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase__ = "src/transformers"
UpperCAmelCase__ = "docs/source/en"
UpperCAmelCase__ = "."
def _A( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
# Find the start prompt.
__lowercase = 0
while not lines[start_index].startswith(UpperCamelCase__ ):
start_index += 1
start_index += 1
__lowercase = start_index
while not lines[end_index].startswith(UpperCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
UpperCAmelCase__ = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def _A( UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
__lowercase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def _A( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Any:
'''simple docstring'''
__lowercase = 2 if text == '''✅''' or text == '''❌''' else len(UpperCamelCase__ )
__lowercase = (width - text_length) // 2
__lowercase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _A( ) -> List[str]:
'''simple docstring'''
__lowercase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowercase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__lowercase = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__lowercase = collections.defaultdict(UpperCamelCase__ )
__lowercase = collections.defaultdict(UpperCamelCase__ )
__lowercase = collections.defaultdict(UpperCamelCase__ )
__lowercase = collections.defaultdict(UpperCamelCase__ )
__lowercase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCamelCase__ ):
__lowercase = None
if attr_name.endswith('''Tokenizer''' ):
__lowercase = slow_tokenizers
__lowercase = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__lowercase = fast_tokenizers
__lowercase = attr_name[:-13]
elif _re_tf_models.match(UpperCamelCase__ ) is not None:
__lowercase = tf_models
__lowercase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__lowercase = flax_models
__lowercase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__lowercase = pt_models
__lowercase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__lowercase = True
break
# Try again after removing the last word in the name
__lowercase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
# Let's build that table!
__lowercase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__lowercase = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__lowercase = [len(UpperCamelCase__ ) + 2 for c in columns]
__lowercase = max([len(UpperCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
__lowercase = '''|''' + '''|'''.join([_center_text(UpperCamelCase__ , UpperCamelCase__ ) for c, w in zip(UpperCamelCase__ , UpperCamelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__lowercase = {True: '''✅''', False: '''❌'''}
for name in model_names:
__lowercase = model_name_to_prefix[name]
__lowercase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCamelCase__ , UpperCamelCase__ ) for l, w in zip(UpperCamelCase__ , UpperCamelCase__ )] ) + "|\n"
return table
def _A( UpperCamelCase__ : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = _find_text_in_file(
filename=os.path.join(UpperCamelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__lowercase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCamelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 332
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a :
"""simple docstring"""
UpperCamelCase_ : float
UpperCamelCase_ : TreeNode | None = None
UpperCamelCase_ : TreeNode | None = None
def _A( UpperCamelCase__ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(UpperCamelCase__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(UpperCamelCase__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
UpperCamelCase__ : TreeNode | None , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , UpperCamelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , UpperCamelCase__ )
)
return is_binary_search_tree_recursive_check(UpperCamelCase__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
| 1
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,**a__ : int ):
requires_backends(self ,["bs4"] )
super().__init__(**a__ )
def lowerCAmelCase_ ( self : List[Any] ,a__ : int ):
a__ = []
a__ = []
a__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
a__ = parent.find_all(child.name ,recursive=a__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(a__ ) else next(i for i, s in enumerate(a__ ,1 ) if s is child ) )
a__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase_ ( self : Tuple ,a__ : Optional[Any] ):
a__ = BeautifulSoup(a__ ,"html.parser" )
a__ = []
a__ = []
a__ = []
for element in html_code.descendants:
if type(a__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
a__ = html.unescape(a__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(a__ )
a__ , a__ = self.xpath_soup(a__ )
stringaxtag_seq.append(a__ )
stringaxsubs_seq.append(a__ )
if len(a__ ) != len(a__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(a__ ) != len(a__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ):
a__ = ""
for tagname, subs in zip(a__ ,a__ ):
xpath += f'/{tagname}'
if subs != 0:
xpath += f'[{subs}]'
return xpath
def __call__( self : Union[str, Any] ,a__ : int ):
a__ = False
# Check that strings has a valid type
if isinstance(a__ ,a__ ):
a__ = True
elif isinstance(a__ ,(list, tuple) ):
if len(a__ ) == 0 or isinstance(html_strings[0] ,a__ ):
a__ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f'but is of type {type(a__ )}.' )
a__ = bool(isinstance(a__ ,(list, tuple) ) and (isinstance(html_strings[0] ,a__ )) )
if not is_batched:
a__ = [html_strings]
# Get nodes + xpaths
a__ = []
a__ = []
for html_string in html_strings:
a__ , a__ , a__ = self.get_three_from_single(a__ )
nodes.append(a__ )
a__ = []
for node, tag_list, sub_list in zip(a__ ,a__ ,a__ ):
a__ = self.construct_xpath(a__ ,a__ )
xpath_strings.append(a__ )
xpaths.append(a__ )
# return as Dict
a__ = {"nodes": nodes, "xpaths": xpaths}
a__ = BatchFeature(data=a__ ,tensor_type=a__ )
return encoded_inputs
| 394
|
'''simple docstring'''
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple ):
a__ = {} # Mapping from char to TrieNode
a__ = False
def lowerCAmelCase_ ( self : str ,a__ : list[str] ):
for word in words:
self.insert(a__ )
def lowerCAmelCase_ ( self : int ,a__ : str ):
a__ = self
for char in word:
if char not in curr.nodes:
a__ = TrieNode()
a__ = curr.nodes[char]
a__ = True
def lowerCAmelCase_ ( self : Any ,a__ : str ):
a__ = self
for char in word:
if char not in curr.nodes:
return False
a__ = curr.nodes[char]
return curr.is_leaf
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : str ):
def _delete(a__ : TrieNode ,a__ : str ,a__ : int ) -> bool:
if index == len(a__ ):
# If word does not exist
if not curr.is_leaf:
return False
a__ = False
return len(curr.nodes ) == 0
a__ = word[index]
a__ = curr.nodes.get(a__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
a__ = _delete(a__ ,a__ ,index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self ,a__ ,0 )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
if node.is_leaf:
print(_lowercase , end=" " )
for key, value in node.nodes.items():
print_words(_lowercase , word + key )
def _lowerCAmelCase ():
"""simple docstring"""
a__ = "banana bananas bandana band apple all beast".split()
a__ = TrieNode()
root.insert_many(_lowercase )
# print_words(root, "")
assert all(root.find(_lowercase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
print(str(_lowercase ) , "works!" if passes else "doesn't work :(" )
def _lowerCAmelCase ():
"""simple docstring"""
assert test_trie()
def _lowerCAmelCase ():
"""simple docstring"""
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 394
| 1
|
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __UpperCAmelCase ( a_):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested')
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested')
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested')
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment')
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate')
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule')
def __UpperCAmelCase ( a_):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case)
def __UpperCAmelCase ( a_):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('--make-reports')
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case)
def __UpperCAmelCase ( a_ , a_):
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
lowercase = doctest.register_optionflag("IGNORE_RESULT")
lowercase = doctest.OutputChecker
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
lowercase = CustomOutputChecker
lowercase = HfDoctestModule
lowercase = HfDocTestParser
| 198
|
"""simple docstring"""
_UpperCamelCase = 8.31_44_62 # Unit - J mol-1 K-1
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> int:
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
if type(lowerCamelCase ) is dict:
return {k: self.to_torch(lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase , device=self.unet.device )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , lowerCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(lowerCamelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , predict_epsilon=lowerCamelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
return x, y
def __call__( self , lowerCamelCase , lowerCamelCase=64 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=0.1 ) -> List[Any]:
# normalize the observations and create batch dimension
snake_case_ = self.normalize(lowerCamelCase , """observations""" )
snake_case_ = obs[None].repeat(lowerCamelCase , axis=0 )
snake_case_ = {0: self.to_torch(lowerCamelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(lowerCamelCase , device=self.unet.device )
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=lowerCamelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(lowerCamelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , lowerCamelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 161
| 0
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , _SCREAMING_SNAKE_CASE , )
| 5
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
lowerCAmelCase_ :Tuple = nums[0]
lowerCAmelCase_ :Any = 0
for num in nums[1:]:
lowerCAmelCase_ :Any = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = PegasusConfig
UpperCAmelCase_ :Optional[int] = {}
UpperCAmelCase_ :List[str] = "gelu"
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=40 , __A=2 , __A=1 , __A=0 , ) -> int:
lowerCAmelCase_ :List[Any] = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Union[str, Any] = seq_length
lowerCAmelCase_ :List[str] = is_training
lowerCAmelCase_ :Tuple = use_labels
lowerCAmelCase_ :str = vocab_size
lowerCAmelCase_ :Optional[int] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :Any = intermediate_size
lowerCAmelCase_ :Dict = hidden_dropout_prob
lowerCAmelCase_ :Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ :List[Any] = max_position_embeddings
lowerCAmelCase_ :Union[str, Any] = eos_token_id
lowerCAmelCase_ :str = pad_token_id
lowerCAmelCase_ :str = bos_token_id
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ :str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ :Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase_ :str = prepare_pegasus_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCAmelCase ( self , __A , __A ) -> Tuple:
lowerCAmelCase_ :str = TFPegasusModel(config=__A ).get_decoder()
lowerCAmelCase_ :str = inputs_dict["""input_ids"""]
lowerCAmelCase_ :Optional[int] = input_ids[:1, :]
lowerCAmelCase_ :Dict = inputs_dict["""attention_mask"""][:1, :]
lowerCAmelCase_ :Union[str, Any] = inputs_dict["""head_mask"""]
lowerCAmelCase_ :int = 1
# first forward pass
lowerCAmelCase_ :List[Any] = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ :Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ :Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ :str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ :Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ :List[Any] = model(__A , attention_mask=__A )[0]
lowerCAmelCase_ :Optional[int] = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ :Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ :Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1E-3 )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any]=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None , lowercase__ : int=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase_ :str = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ :Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ :Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ :Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ :Union[str, Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ :Union[str, Any] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :List[Any] = True
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :str = TFPegasusModelTester(self )
lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :List[str] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ :str = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ :Optional[int] = "google/pegasus-xsum"
@cached_property
def __lowerCAmelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self , **__A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.translate_src_text(**__A )
assert self.expected_text == generated_words
def __lowerCAmelCase ( self , **__A ) -> str:
lowerCAmelCase_ :str = self.tokenizer(self.src_text , **__A , padding=__A , return_tensors="""tf""" )
lowerCAmelCase_ :int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
lowerCAmelCase_ :int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )
return generated_words
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 256
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 207
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 391
| 0
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_a : Dict = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class __A ( UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ = """ernie_m"""
lowerCAmelCase_ = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""}
def __init__( self , __lowerCAmelCase = 2_5_0_0_0_2 , __lowerCAmelCase = 7_6_8 , __lowerCAmelCase = 1_2 , __lowerCAmelCase = 1_2 , __lowerCAmelCase = 3_0_7_2 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 5_1_4 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-05 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = is_decoder
lowerCamelCase__ = act_dropout
| 716
|
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29
| 0
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A__ = cst_fwd.get(__UpperCamelCase , np.inf )
A__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A__ = new_cost_f
A__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = -1
A__ = set()
A__ = set()
A__ = {source: 0}
A__ = {destination: 0}
A__ = {source: None}
A__ = {destination: None}
A__ = PriorityQueue()
A__ = PriorityQueue()
A__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A__ , A__ = queue_forward.get()
visited_forward.add(__UpperCamelCase )
A__ , A__ = queue_backward.get()
visited_backward.add(__UpperCamelCase )
A__ = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
A__ = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A__ = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
SCREAMING_SNAKE_CASE__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
"""simple docstring"""
class lowercase :
def __init__(self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = {}
def UpperCAmelCase (self : Union[str, Any] ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(SCREAMING_SNAKE_CASE_ ,''' -> ''' ,''' -> '''.join([str(SCREAMING_SNAKE_CASE_ ) for j in self.vertex[i]] ) )
def UpperCAmelCase (self : Optional[Any] ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : int ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(SCREAMING_SNAKE_CASE_ )
else:
# else make a new vertex
lowerCAmelCase = [to_vertex]
def UpperCAmelCase (self : List[str] ) -> None:
"""simple docstring"""
lowerCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[Any] ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : list ) -> None:
"""simple docstring"""
lowerCAmelCase = True
print(SCREAMING_SNAKE_CASE_ ,end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 535
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCamelCase ( a : Any , a : Any=0.999 , a : Any="cosine" , ) ->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
snake_case = []
for i in range(a ):
snake_case = i / num_diffusion_timesteps
snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class _lowercase ( __a , __a ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self , A__ = 10_00 , A__ = 0.0_0_0_8_5 , A__ = 0.0_1_2 , A__ = "linear" , A__ = None , A__ = "epsilon" , A__ = "linspace" , A__ = 0 , ) -> List[Any]:
if trained_betas is not None:
snake_case = torch.tensor(A__ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case = torch.linspace(A__ , A__ , A__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case = betas_for_alpha_bar(A__ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
snake_case = 1.0 - self.betas
snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A__ , A__ , A__ )
def UpperCamelCase ( self , A__ , A__=None ) -> List[str]:
if schedule_timesteps is None:
snake_case = self.timesteps
snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case = 1 if len(A__ ) > 1 else 0
else:
snake_case = timestep.cpu().item() if torch.is_tensor(A__ ) else timestep
snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self , A__ , A__ , ) -> torch.FloatTensor:
snake_case = self.index_for_timestep(A__ )
if self.state_in_first_order:
snake_case = self.sigmas[step_index]
else:
snake_case = self.sigmas_interpol[step_index]
snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , ) -> Any:
snake_case = num_inference_steps
snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case = np.linspace(0 , num_train_timesteps - 1 , A__ , dtype=A__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(0 , A__ ) * step_ratio).round()[::-1].copy().astype(A__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(A__ , 0 , -step_ratio )).round().copy().astype(A__ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case = torch.from_numpy(np.log(A__ ) ).to(A__ )
snake_case = np.interp(A__ , np.arange(0 , len(A__ ) ) , A__ )
snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case = torch.from_numpy(A__ ).to(device=A__ )
# interpolate sigmas
snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(A__ ).startswith('''mps''' ):
# mps does not support float64
snake_case = torch.from_numpy(A__ ).to(A__ , dtype=torch.floataa )
else:
snake_case = torch.from_numpy(A__ ).to(A__ )
# interpolate timesteps
snake_case = self.sigma_to_t(A__ ).to(A__ , dtype=timesteps.dtype )
snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case = defaultdict(A__ )
def UpperCamelCase ( self , A__ ) -> int:
# get log sigma
snake_case = sigma.log()
# get distribution
snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
snake_case = low_idx + 1
snake_case = self.log_sigmas[low_idx]
snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
snake_case = (low - log_sigma) / (low - high)
snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
snake_case = (1 - w) * low_idx + w * high_idx
snake_case = t.view(sigma.shape )
return t
@property
def UpperCamelCase ( self ) -> List[str]:
return self.sample is None
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case = self.index_for_timestep(A__ )
# advance index counter by 1
snake_case = timestep.cpu().item() if torch.is_tensor(A__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case = self.sigmas[step_index]
snake_case = self.sigmas_interpol[step_index + 1]
snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
snake_case = self.sigmas[step_index - 1]
snake_case = self.sigmas_interpol[step_index]
snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case = 0
snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
snake_case = sigma_next - sigma_hat
snake_case = self.sample
snake_case = None
snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A__ ):
# mps does not support float64
snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case = self.timesteps.to(original_samples.device )
snake_case = timesteps.to(original_samples.device )
snake_case = [self.index_for_timestep(A__ , A__ ) for t in timesteps]
snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case = sigma.unsqueeze(-1 )
snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 44
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : int = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 343
|
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 135
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict):
a : Tuple = 0
a : Any = [0]
a : List[Any] = [0]
a : List[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : Dict = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = 3
a : Any = [1, 2, 3]
a : List[Any] = [3, 2, 1]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : str):
a : int = 50
a : Dict = [60, 100, 120]
a : Tuple = [10, 20, 30]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 135
| 1
|
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Union[str, Any] = str(__UpperCAmelCase )
while len(__UpperCAmelCase ) != 1:
UpperCamelCase__ : str = [int(__UpperCAmelCase ) for i in num_string]
UpperCamelCase__ : Optional[Any] = 1
for i in range(0 , len(__UpperCAmelCase ) ):
total *= numbers[i]
UpperCamelCase__ : Optional[int] = str(__UpperCAmelCase )
steps += 1
return steps
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = str(__UpperCAmelCase )
while len(__UpperCAmelCase ) != 1:
UpperCamelCase__ : Optional[Any] = [int(__UpperCAmelCase ) for i in num_string]
UpperCamelCase__ : Tuple = 0
for i in range(0 , len(__UpperCAmelCase ) ):
total += numbers[i]
UpperCamelCase__ : Dict = str(__UpperCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
|
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> float:
return 10 - x * x
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) >= 0:
raise ValueError('''Wrong space!''' )
UpperCamelCase__ : Optional[int] = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase__ : int = (a + b) / 2
# Check if middle point is root
if equation(__UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) < 0:
UpperCamelCase__ : str = c
else:
UpperCamelCase__ : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 253
| 1
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = data
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = None
def lowerCamelCase ( ) -> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
__UpperCAmelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower()
__UpperCAmelCase : queue.Queue = queue.Queue()
__UpperCAmelCase : int = TreeNode(int(_UpperCamelCase ) )
q.put(_UpperCamelCase )
while not q.empty():
__UpperCAmelCase : List[str] = q.get()
__UpperCAmelCase : List[str] = f'''Enter the left node of {node_found.data}: '''
__UpperCAmelCase : Tuple = input(_UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__UpperCAmelCase : str = TreeNode(int(_UpperCamelCase ) )
__UpperCAmelCase : List[Any] = left_node
q.put(_UpperCamelCase )
__UpperCAmelCase : List[str] = f'''Enter the right node of {node_found.data}: '''
__UpperCAmelCase : Tuple = input(_UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__UpperCAmelCase : List[str] = TreeNode(int(_UpperCamelCase ) )
__UpperCAmelCase : Tuple = right_node
q.put(_UpperCamelCase )
raise
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : queue.Queue = queue.Queue()
q.put(_UpperCamelCase )
while not q.empty():
__UpperCAmelCase : str = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : queue.Queue = queue.Queue()
q.put(_UpperCamelCase )
while not q.empty():
__UpperCAmelCase : Union[str, Any] = []
while not q.empty():
__UpperCAmelCase : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : list[TreeNode] = []
__UpperCAmelCase : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(_UpperCamelCase )
__UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
__UpperCAmelCase : List[str] = stack.pop()
# start to traverse its right child
__UpperCAmelCase : List[str] = n.right
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : list[TreeNode] = []
__UpperCAmelCase : Dict = node
while n or stack:
while n:
stack.append(_UpperCamelCase )
__UpperCAmelCase : Tuple = n.left
__UpperCAmelCase : Any = stack.pop()
print(n.data , end=""",""" )
__UpperCAmelCase : List[Any] = n.right
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = [], []
__UpperCAmelCase : Optional[Any] = node
stacka.append(_UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
__UpperCAmelCase : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def lowerCamelCase ( _UpperCamelCase : str = "" , _UpperCamelCase : int=5_0 , _UpperCamelCase : Tuple="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
__UpperCAmelCase ,__UpperCAmelCase : Tuple = divmod(width - len(_UpperCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
UpperCAmelCase : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 299
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase : int = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = UNetaDModel
__a = """sample"""
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : Tuple = (32, 32)
__UpperCAmelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase )
__UpperCAmelCase : Dict = torch.tensor([10] ).to(UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
__UpperCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = UNetaDModel
__a = """sample"""
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 4
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : int = (32, 32)
__UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([10] ).to(UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
__UpperCAmelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase )
model.to(UpperCamelCase )
__UpperCAmelCase : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase )
model_accelerate.to(UpperCamelCase )
model_accelerate.eval()
__UpperCAmelCase : Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : Tuple = noise.to(UpperCamelCase )
__UpperCAmelCase : List[str] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase )
__UpperCAmelCase : List[str] = model_accelerate(UpperCamelCase , UpperCamelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__UpperCAmelCase ,__UpperCAmelCase : int = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase , low_cpu_mem_usage=UpperCamelCase )
model_normal_load.to(UpperCamelCase )
model_normal_load.eval()
__UpperCAmelCase : Optional[int] = model_normal_load(UpperCamelCase , UpperCamelCase )["""sample"""]
assert torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1e-3 )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCamelCase )
__UpperCAmelCase : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : Optional[int] = noise.to(UpperCamelCase )
__UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : Dict = model(UpperCamelCase , UpperCamelCase ).sample
__UpperCAmelCase : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1e-3 ) )
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = UNetaDModel
__a = """sample"""
@property
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Union[str, Any]=(32, 32) ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 4
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase )
__UpperCAmelCase : Tuple = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
__UpperCAmelCase : Any = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase )
__UpperCAmelCase : Tuple = self.dummy_input
__UpperCAmelCase : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase )
__UpperCAmelCase : Dict = noise
__UpperCAmelCase : List[str] = model(**UpperCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCamelCase )
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : int = 3
__UpperCAmelCase : Optional[int] = (256, 256)
__UpperCAmelCase : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase )
__UpperCAmelCase : Tuple = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(UpperCamelCase , UpperCamelCase ).sample
__UpperCAmelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : str = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1e-2 ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCamelCase )
__UpperCAmelCase : Optional[int] = 4
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : str = (32, 32)
__UpperCAmelCase : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , UpperCamelCase ).sample
__UpperCAmelCase : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : str = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1e-2 ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
| 299
| 1
|
import math
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if (
not isinstance(UpperCAmelCase_ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if (
not isinstance(UpperCAmelCase_ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583
|
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] ):
lowercase : Union[str, Any] = len(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
for j in range(i + 1 , UpperCAmelCase_ ):
if numbers[j] < numbers[i]:
lowercase , lowercase : int = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 583
| 1
|
def lowerCamelCase__ ( __lowerCAmelCase : list ):
"""simple docstring"""
lowerCAmelCase_ = len(UpperCAmelCase__ )
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase_ = collection[i]
lowerCAmelCase_ = 0
lowerCAmelCase_ = i - 1
while low <= high:
lowerCAmelCase_ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
for j in range(UpperCAmelCase__ , UpperCAmelCase__ , -1 ):
lowerCAmelCase_ = collection[j - 1]
lowerCAmelCase_ = val
return collection
if __name__ == "__main__":
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 710
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_A = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Any:
lowerCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
lowerCAmelCase_ = self.transformer_dir
shutil.copy(
os.path.join(_UpperCamelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[int]:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
lowerCAmelCase_ = os.path.join(self.transformer_dir , "new_code.py" )
with open(_UpperCamelCase , "w" , newline="\n" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def __a ( self ) -> Any:
lowerCAmelCase_ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Any:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , _UpperCamelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase_ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , _UpperCamelCase , overwrite_result=re.sub("Bert" , "TestModel" , _UpperCamelCase ) , )
def __a ( self ) -> Any:
lowerCAmelCase_ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
self.assertFalse(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCamelCase )
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 279
| 0
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase :List[str] = 5
# Realm tok
UpperCamelCase :List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.get_config()
UpperCamelCase :str = self.get_dummy_retriever()
UpperCamelCase :int = retriever.tokenizer
UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' )
UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Optional[Any] = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.get_config()
UpperCamelCase :Union[str, Any] = self.get_dummy_retriever()
UpperCamelCase :Dict = retriever.tokenizer
UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' )
UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Optional[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Any = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCamelCase :Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 658
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = GenerationConfig()
UpperCamelCase :List[str] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = GenerationConfig()
UpperCamelCase :Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase :Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase :List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 658
| 1
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def __a ( __lowerCamelCase : str , __lowerCamelCase : str ) -> str:
'''simple docstring'''
lowercase_ = RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
lowercase_ = torch.load(hf_hub_download(repo_id=__lowerCamelCase , filename="pytorch_model.bin" ) )
lowercase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
lowercase_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
lowercase_ = tensor_value
lowercase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase , config=__lowerCamelCase , state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
lowercase_ = AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ : int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 710
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ : Tuple = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ : Dict = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ =VOCAB_FILES_NAMES
lowerCamelCase_ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ =['input_ids', 'attention_mask']
lowerCamelCase_ =GPTaTokenizer
def __init__( self : Dict , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any="<|endoftext|>" , __lowerCAmelCase : Union[str, Any]="<|endoftext|>" , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Optional[Any]=False , **__lowerCAmelCase : Dict , ) -> int:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , __lowerCAmelCase)
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , __lowerCAmelCase) != add_prefix_space:
lowercase_ = getattr(__lowerCAmelCase , pre_tok_state.pop("type"))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**__lowerCAmelCase)
lowercase_ = add_prefix_space
def __UpperCAmelCase ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str) -> BatchEncoding:
lowercase_ = kwargs.get("is_split_into_words" , __lowerCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : List[Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Union[str, Any]) -> BatchEncoding:
lowercase_ = kwargs.get("is_split_into_words" , __lowerCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
lowercase_ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase)
return tuple(__lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : "Conversation") -> List[int]:
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) + [self.eos_token_id])
if len(__lowerCAmelCase) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 461
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A ( lowercase__ : List[str] ) -> Tuple:
UpperCamelCase__ :Dict = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"""{test_file} instead.""" )
UpperCamelCase__ :Optional[int] = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCamelCase__ :Union[str, Any] = components[:-1] + [test_fn.replace(""".py""" , """""" )]
UpperCamelCase__ :List[Any] = """.""".join(lowercase__ )
return test_module_path
def A ( lowercase__ : int ) -> Tuple:
UpperCamelCase__ :Optional[Any] = get_module_path(lowercase__ )
UpperCamelCase__ :List[str] = importlib.import_module(lowercase__ )
return test_module
def A ( lowercase__ : Union[str, Any] ) -> int:
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :str = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(lowercase__ , lowercase__ ) )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Union[str, Any] ) -> Optional[Any]:
UpperCamelCase__ :int = []
UpperCamelCase__ :Union[str, Any] = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , lowercase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase__ :Union[str, Any] = getattr(lowercase__ , """all_model_classes""" , [] )
if len(lowercase__ ) > 0:
test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Dict ) -> Union[str, Any]:
UpperCamelCase__ :List[str] = get_test_classes(lowercase__ )
UpperCamelCase__ :Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : List[str] ) -> List[str]:
UpperCamelCase__ :Any = test_class()
if hasattr(lowercase__ , """setUp""" ):
test.setUp()
UpperCamelCase__ :str = None
if hasattr(lowercase__ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase__ :str = test.model_tester.__class__
return model_tester
def A ( lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]:
UpperCamelCase__ :int = get_test_classes(lowercase__ )
UpperCamelCase__ :str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] ) -> List[Any]:
UpperCamelCase__ :Optional[Any] = get_test_classes_for_model(lowercase__ , lowercase__ )
UpperCamelCase__ :Optional[Any] = []
for test_class in test_classes:
UpperCamelCase__ :Optional[int] = get_model_tester_from_test_class(lowercase__ )
if tester_class is not None:
tester_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Tuple ) -> Optional[Any]:
UpperCamelCase__ :Tuple = get_test_classes(lowercase__ )
UpperCamelCase__ :Any = {test_class: get_model_tester_from_test_class(lowercase__ ) for test_class in test_classes}
return test_tester_mapping
def A ( lowercase__ : int ) -> Tuple:
UpperCamelCase__ :Tuple = get_model_classes(lowercase__ )
UpperCamelCase__ :Dict = {
model_class: get_test_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_test_mapping
def A ( lowercase__ : str ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = get_model_classes(lowercase__ )
UpperCamelCase__ :Tuple = {
model_class: get_tester_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def A ( lowercase__ : Optional[Any] ) -> List[str]:
if isinstance(lowercase__ , lowercase__ ):
return o
elif isinstance(lowercase__ , lowercase__ ):
return o.__name__
elif isinstance(lowercase__ , (list, tuple) ):
return [to_json(lowercase__ ) for x in o]
elif isinstance(lowercase__ , lowercase__ ):
return {to_json(lowercase__ ): to_json(lowercase__ ) for k, v in o.items()}
else:
return o
| 45
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Any:
'''simple docstring'''
super().__init__(
features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = Generator(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , generator=__UpperCAmelCase , gen_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
lowerCAmelCase__ = self.builder.as_dataset(
split="train" , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 339
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({} )
lowerCamelCase__ = "text"
@property
def __UpperCamelCase ( self ):
return {self.text_column: "text"}
| 419
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : int = tempfile.mkdtemp()
snake_case__ : Optional[int] = 8
# DPR tok
snake_case__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Dict = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Optional[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Dict = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __UpperCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
snake_case__ : str = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case__ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__SCREAMING_SNAKE_CASE )
rag_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
snake_case__ : List[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : Tuple = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
snake_case__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 419
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 42
A_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 34
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( a_ ):
__lowerCAmelCase = ["pixel_values"]
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , **a_ , ):
super().__init__(**a_ )
a_ : Any = size if size is not None else {"shortest_edge": 2_5_6}
a_ : Any = get_size_dict(a_ , default_to_square=a_ )
a_ : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
a_ : Optional[int] = get_size_dict(a_ , param_name="crop_size" )
a_ : List[Any] = do_resize
a_ : List[str] = size
a_ : Dict = resample
a_ : int = do_center_crop
a_ : List[Any] = crop_size
a_ : Union[str, Any] = do_rescale
a_ : str = rescale_factor
a_ : Any = do_normalize
a_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , a_ , a_ , a_ = PILImageResampling.BICUBIC , a_ = None , **a_ , ):
a_ : Tuple = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a_ : Optional[int] = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ , ):
a_ : Any = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ ):
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ , a_ = None , **a_ , ):
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
a_ : int = do_resize if do_resize is not None else self.do_resize
a_ : Any = size if size is not None else self.size
a_ : Dict = get_size_dict(a_ , default_to_square=a_ )
a_ : int = resample if resample is not None else self.resample
a_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Any = crop_size if crop_size is not None else self.crop_size
a_ : Dict = get_size_dict(a_ , param_name="crop_size" )
a_ : str = do_rescale if do_rescale is not None else self.do_rescale
a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a_ : int = image_mean if image_mean is not None else self.image_mean
a_ : Dict = image_std if image_std is not None else self.image_std
a_ : int = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a_ : Optional[int] = [to_numpy_array(a_ ) for image in images]
if do_resize:
a_ : Union[str, Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
a_ : Union[str, Any] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
a_ : int = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
a_ : List[str] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
a_ : Tuple = [to_channel_dimension_format(a_ , a_ ) for image in images]
a_ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
def snake_case_ ( self , a_ , a_ = None ):
a_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a_ ):
a_ : str = target_sizes.numpy()
a_ : int = []
for idx in range(len(a_ ) ):
a_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a_ )
a_ : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
a_ : List[str] = logits.argmax(dim=1 )
a_ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 237
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = position
_lowerCamelCase : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_lowerCamelCase : Any = []
for position in positions:
_lowerCamelCase : List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowercase__ )
return permissible_positions
def _snake_case ( lowercase__ ):
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if is_complete(lowercase__ ):
return True
for position in get_valid_pos(lowercase__ , len(lowercase__ ) ):
_lowerCamelCase : int = position
if board[y][x] == 0:
_lowerCamelCase : Tuple = curr + 1
if open_knight_tour_helper(lowercase__ , lowercase__ , curr + 1 ):
return True
_lowerCamelCase : List[Any] = 0
return False
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
for i in range(lowercase__ ):
for j in range(lowercase__ ):
_lowerCamelCase : Dict = 1
if open_knight_tour_helper(lowercase__ , (i, j) , 1 ):
return board
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Dict = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """sew-d"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase=2 , lowercase=512 , lowercase=256 , lowercase=True , lowercase=True , lowercase=("p2c", "c2p") , lowercase="layer_norm" , lowercase="gelu_python" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-7 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase=False , lowercase=128 , lowercase=16 , lowercase=True , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=256 , lowercase=0 , lowercase=1 , lowercase=2 , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : int = feat_extract_norm
_lowerCamelCase : Optional[Any] = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Union[str, Any] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : str = num_conv_pos_embeddings
_lowerCamelCase : Optional[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : Tuple = len(self.conv_dim )
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Dict = squeeze_factor
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[str] = position_buckets
_lowerCamelCase : Dict = share_att_key
_lowerCamelCase : List[str] = relative_attention
_lowerCamelCase : Optional[Any] = norm_rel_ebd
_lowerCamelCase : List[Any] = list(lowercase )
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_dropout
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Any = activation_dropout
_lowerCamelCase : Tuple = feat_proj_dropout
_lowerCamelCase : Any = final_dropout
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = feature_layer_norm_eps
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Dict = apply_spec_augment
_lowerCamelCase : int = mask_time_prob
_lowerCamelCase : Union[str, Any] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[Any] = mask_feature_prob
_lowerCamelCase : Union[str, Any] = mask_feature_length
_lowerCamelCase : List[Any] = mask_feature_min_masks
# ctc loss
_lowerCamelCase : List[str] = ctc_loss_reduction
_lowerCamelCase : List[str] = ctc_zero_infinity
# sequence classification
_lowerCamelCase : Dict = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
@property
def A_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 492
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(_lowerCAmelCase )
_validate_point(_lowerCAmelCase )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def a_ ( _lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for item in point:
if not isinstance(_lowerCAmelCase , (int, float) ):
lowercase__ : int = (
'Expected a list of numbers as input, found '
f"""{type(_lowerCAmelCase ).__name__}"""
)
raise TypeError(_lowerCAmelCase )
else:
lowercase__ : List[Any] = f"""Expected a list of numbers as input, found {type(_lowerCAmelCase ).__name__}"""
raise TypeError(_lowerCAmelCase )
else:
raise ValueError('Missing an input' )
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(_lowerCAmelCase )
_validate_point(_lowerCAmelCase )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 599
|
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( _lowerCAmelCase : int = 200_0000 ):
'''simple docstring'''
lowercase__ : list[int] = [0]
lowercase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase__ : int = 0
# an estimate of b, using the quadratic formula
lowercase__ : float
# the largest integer less than b_estimate
lowercase__ : int
# the largest integer less than b_estimate
lowercase__ : int
# the triangle number corresponding to b_floor
lowercase__ : int
# the triangle number corresponding to b_ceil
lowercase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowercase__ : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase__ : Union[str, Any] = floor(_lowerCAmelCase )
lowercase__ : Tuple = ceil(_lowerCAmelCase )
lowercase__ : Optional[Any] = triangle_numbers[b_floor]
lowercase__ : Optional[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ : Union[str, Any] = triangle_b_first_guess * triangle_a
lowercase__ : Optional[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ : str = triangle_b_second_guess * triangle_a
lowercase__ : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 599
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ["ViTFeatureExtractor"]
lowercase__ : str = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 451
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451
| 1
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' ,[
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] ,)
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' ,'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
SCREAMING_SNAKE_CASE : Tuple = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' ,[
DatasetInfo(),
DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,),
] ,)
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: DatasetInfo ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase ,'dataset_info.json' ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = DatasetInfo(
description='foo' ,citation='bar' ,homepage='https://foo.bar' ,license='CC0' ,features=Features({'a': Value('int32' )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train', 'num_examples': 42}] ,download_checksums={} ,download_size=13_37 ,post_processing_size=4_42 ,dataset_size=12_34 ,size_in_bytes=13_37 + 4_42 + 12_34 ,)
SCREAMING_SNAKE_CASE : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
SCREAMING_SNAKE_CASE : List[str] = yaml.safe_dump(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DatasetInfo()
SCREAMING_SNAKE_CASE : Optional[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' ,[
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] ,)
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: DatasetInfosDict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase ,'README.md' ) )
| 28
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : torch.FloatTensor
class snake_case ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase_ : Tuple[int] = (64,) , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 256 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : float = 0.18215 , lowerCamelCase_ : str = "group" , ) ->str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
UpperCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
UpperCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
UpperCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->VQEncoderOutput:
'''simple docstring'''
UpperCAmelCase__ = self.encoder(lowerCamelCase_ )
UpperCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def UpperCAmelCase ( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
UpperCAmelCase__ = h
UpperCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
UpperCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase__ = sample
UpperCAmelCase__ = self.encode(lowerCamelCase_ ).latents
UpperCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
| 392
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
snake_case_ = StableDiffusionInstructPixaPixPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_: Optional[int] = PNDMScheduler(skip_prk_steps=A__ )
torch.manual_seed(0 )
UpperCAmelCase_: Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_: Any = CLIPTextModel(A__ )
UpperCAmelCase_: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case_ ( self , A__ , A__=0 ):
"""simple docstring"""
UpperCAmelCase_: int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
UpperCAmelCase_: List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Optional[Any] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" )
if str(A__ ).startswith("mps" ):
UpperCAmelCase_: Optional[int] = torch.manual_seed(A__ )
else:
UpperCAmelCase_: List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
UpperCAmelCase_: str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: List[str] = self.get_dummy_components()
UpperCAmelCase_: Tuple = StableDiffusionInstructPixaPixPipeline(**A__ )
UpperCAmelCase_: List[Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = self.get_dummy_inputs(A__ )
UpperCAmelCase_: int = sd_pipe(**A__ ).images
UpperCAmelCase_: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: List[str] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: str = self.get_dummy_components()
UpperCAmelCase_: Optional[int] = StableDiffusionInstructPixaPixPipeline(**A__ )
UpperCAmelCase_: int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = self.get_dummy_inputs(A__ )
UpperCAmelCase_: List[Any] = "french fries"
UpperCAmelCase_: Tuple = sd_pipe(**A__ , negative_prompt=A__ )
UpperCAmelCase_: Optional[int] = output.images
UpperCAmelCase_: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: Optional[int] = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[int] = self.get_dummy_components()
UpperCAmelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**A__ )
UpperCAmelCase_: Any = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: List[Any] = self.get_dummy_inputs(A__ )
UpperCAmelCase_: List[str] = [inputs["prompt"]] * 2
UpperCAmelCase_: int = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
UpperCAmelCase_: Optional[Any] = torch.from_numpy(A__ ).unsqueeze(0 ).to(A__ )
UpperCAmelCase_: Optional[Any] = image / 2 + 0.5
UpperCAmelCase_: int = image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_: Optional[int] = image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_: Dict = sd_pipe(**A__ ).images
UpperCAmelCase_: int = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_: List[str] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.get_dummy_components()
UpperCAmelCase_: Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
UpperCAmelCase_: List[Any] = StableDiffusionInstructPixaPixPipeline(**A__ )
UpperCAmelCase_: List[Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: List[Any] = self.get_dummy_inputs(A__ )
UpperCAmelCase_: Optional[int] = sd_pipe(**A__ ).images
UpperCAmelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_: Any = [round(A__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(A__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: Tuple = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.get_dummy_components()
UpperCAmelCase_: Optional[Any] = StableDiffusionInstructPixaPixPipeline(**A__ )
UpperCAmelCase_: Any = VaeImageProcessor(do_resize=A__ , do_normalize=A__ )
UpperCAmelCase_: List[str] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Dict = pipe(**self.get_dummy_inputs_by_type(A__ , input_image_type="pt" ) )[0]
UpperCAmelCase_: Optional[int] = components["vae"]
UpperCAmelCase_: Union[str, Any] = self.get_dummy_inputs_by_type(A__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_: Any = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_: int = pipe(**A__ )[0]
UpperCAmelCase_: Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(A__ , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , A__=0 ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = torch.manual_seed(A__ )
UpperCAmelCase_: List[Any] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
UpperCAmelCase_: Optional[int] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Union[str, Any] = self.get_inputs()
UpperCAmelCase_: List[Any] = pipe(**A__ ).images
UpperCAmelCase_: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_: Union[str, Any] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=A__ )
UpperCAmelCase_: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Optional[int] = self.get_inputs()
UpperCAmelCase_: Optional[Any] = pipe(**A__ ).images
UpperCAmelCase_: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_: Tuple = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=A__ )
UpperCAmelCase_: Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: str = self.get_inputs()
UpperCAmelCase_: Union[str, Any] = pipe(**A__ ).images
UpperCAmelCase_: Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_: str = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = 0
def callback_fn(A__ , A__ , A__ ) -> None:
UpperCAmelCase_: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_: List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_: str = latents[0, -3:, -3:, -1]
UpperCAmelCase_: str = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase_: Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_: Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase_: Optional[Any] = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase_: Tuple = False
UpperCAmelCase_: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=A__ , torch_dtype=torch.floataa )
UpperCAmelCase_: Tuple = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Tuple = self.get_inputs()
pipe(**A__ , callback=A__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case_ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=A__ , torch_dtype=torch.floataa )
UpperCAmelCase_: Tuple = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: Any = self.get_inputs()
UpperCAmelCase_: Union[str, Any] = pipe(**A__ )
UpperCAmelCase_: str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_: Optional[Any] = inputs["image"].resize((504, 504) )
UpperCAmelCase_: int = "timbrooks/instruct-pix2pix"
UpperCAmelCase_: Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
A__ , safety_checker=A__ , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: int = pipe(**A__ )
UpperCAmelCase_: str = output.images[0]
UpperCAmelCase_: Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCAmelCase_: Dict = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 306
|
from __future__ import annotations
_lowerCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( _a ) -> list[float]:
UpperCAmelCase_: Dict = []
UpperCAmelCase_: List[Any] = len(_a )
for i in range(_a ):
UpperCAmelCase_: float = -1
for j in range(i + 1 ,_a ):
if arr[i] < arr[j]:
UpperCAmelCase_: List[str] = arr[j]
break
result.append(_a )
return result
def lowercase ( _a ) -> list[float]:
UpperCAmelCase_: List[Any] = []
for i, outer in enumerate(_a ):
UpperCAmelCase_: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase_: Union[str, Any] = inner
break
result.append(_a )
return result
def lowercase ( _a ) -> list[float]:
UpperCAmelCase_: Union[str, Any] = len(_a )
UpperCAmelCase_: list[float] = []
UpperCAmelCase_: list[float] = [-1] * arr_size
for index in reversed(range(_a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase_: Union[str, Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 306
| 1
|
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : List[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ):
"""simple docstring"""
__A : Dict = graph
# mapping node to its parent in resulting breadth first tree
__A : dict[str, str | None] = {}
__A : Union[str, Any] = source_vertex
def snake_case__ ( self ):
"""simple docstring"""
__A : str = {self.source_vertex}
__A : List[Any] = None
__A : int = [self.source_vertex] # first in first out queue
while queue:
__A : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__A : Dict = vertex
queue.append(__lowercase )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
__A : Tuple = self.parent.get(__lowercase )
if target_vertex_parent is None:
__A : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
UpperCAmelCase_ : str = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 365
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Any = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ['MaskFormerFeatureExtractor']
UpperCAmelCase_ : List[str] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCAmelCase_ : Optional[int] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 365
| 1
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {'''vocab_file''': '''vocab.txt'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
_lowerCAmelCase : int = {
'''openbmb/cpm-ant-10b''': 1024,
}
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Tuple = collections.OrderedDict()
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as reader:
_lowerCamelCase : Any = reader.readlines()
for index, token in enumerate(_lowerCamelCase ):
_lowerCamelCase : Dict = token.rstrip("\n" )
_lowerCamelCase : Dict = index
return vocab
class A_ ( _a ):
def __init__( self: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: Dict="<unk>" ,__lowerCAmelCase: Dict=200 ):
'''simple docstring'''
_lowerCamelCase : Any = vocab
_lowerCamelCase : Union[str, Any] = unk_token
_lowerCamelCase : Dict = max_input_chars_per_word
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCamelCase : Dict = 0
_lowerCamelCase : Dict = []
while start < len(__lowerCAmelCase ):
_lowerCamelCase : Any = len(__lowerCAmelCase )
_lowerCamelCase : Tuple = None
while start < end:
_lowerCamelCase : str = "".join(chars[start:end] )
if substr in self.vocab:
_lowerCamelCase : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = end
return sub_tokens
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = False
def __init__( self: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int="<d>" ,__lowerCAmelCase: List[Any]="</d>" ,__lowerCAmelCase: Optional[Any]="<s>" ,__lowerCAmelCase: Dict="</s>" ,__lowerCAmelCase: Optional[int]="<pad>" ,__lowerCAmelCase: str="<unk>" ,__lowerCAmelCase: int="</n>" ,__lowerCAmelCase: Any="</_>" ,__lowerCAmelCase: List[Any]="left" ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
requires_backends(self ,["jieba"] )
super().__init__(
bod_token=__lowerCAmelCase ,eod_token=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,line_token=__lowerCAmelCase ,space_token=__lowerCAmelCase ,padding_side=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Dict = bod_token
_lowerCamelCase : str = eod_token
_lowerCamelCase : str = load_vocab(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.encoder[space_token]
_lowerCamelCase : int = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __lowerCAmelCase : x[1] ) )
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Tuple = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return self.encoder["\n"]
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return len(self.encoder )
def _lowercase ( self: List[str] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = []
for x in jieba.cut(__lowerCAmelCase ,cut_all=__lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCAmelCase ) )
return output_tokens
def _lowercase ( self: int ,__lowerCAmelCase: Any ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [i for i in token_ids if i >= 0]
_lowerCamelCase : Dict = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return token in self.encoder
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return "".join(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
return self.encoder.get(__lowerCAmelCase ,self.encoder.get(self.unk_token ) )
def _lowercase ( self: Any ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.decoder.get(__lowerCAmelCase ,self.unk_token )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : str = os.path.join(
__lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_lowerCamelCase : Tuple = (filename_prefix + "-" if filename_prefix else "") + save_directory
_lowerCamelCase : int = 0
if " " in self.encoder:
_lowerCamelCase : Dict = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCamelCase : List[Any] = self.encoder["\n"]
del self.encoder["\n"]
_lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __lowerCAmelCase : x[1] ) )
with open(__lowerCAmelCase ,"w" ,encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
_lowerCamelCase : List[Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase ,token_ids_a=__lowerCAmelCase ,already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase ))
return [1] + ([0] * len(__lowerCAmelCase ))
| 386
|
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , 2 ) - a
def lowerCamelCase_( _lowerCamelCase ) -> float:
'''simple docstring'''
return 2 * x
def lowerCamelCase_( _lowerCamelCase ) -> float:
'''simple docstring'''
_lowerCamelCase : Tuple = 2.0
while start <= a:
_lowerCamelCase : int = math.pow(_lowerCamelCase , 2 )
return start
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 9999 , _lowerCamelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_lowerCamelCase : Any = get_initial_point(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = value
_lowerCamelCase : Tuple = value - fx(_lowerCamelCase , _lowerCamelCase ) / fx_derivative(_lowerCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 386
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=1_0 , __magic_name__=3 , __magic_name__=2 , __magic_name__=2 , __magic_name__=2 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=0.9 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = tubelet_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowerCAmelCase = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool()
_lowerCAmelCase = model(__magic_name__ , __magic_name__ )
# model only returns predictions for masked patches
_lowerCAmelCase = mask.sum().item()
_lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCamelCase : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Dict = False
UpperCamelCase : int = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : List[str] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(__magic_name__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.model_tester.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowerCAmelCase = bool_masked_pos.to(__magic_name__ )
if return_labels:
if model_class in [
*get_values(__magic_name__ ),
]:
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = VideoMAEModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowerCAmelCase = len(__magic_name__ )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
_lowerCAmelCase = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# add boolean mask, indicating which patches to mask
_lowerCAmelCase = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_lowerCAmelCase = torch.load(__magic_name__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_lowerCAmelCase = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=__magic_name__ )
self.assertEqual(outputs.logits.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowerCAmelCase = torch.tensor([0.51_42] , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=__magic_name__ ).to(
__magic_name__ )
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
_lowerCAmelCase = torch.tensor(torch.tensor([0.64_69] ) , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1e-4 ) )
| 589
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCAmelCase = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline(
"""document-question-answering""" , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE : Tuple = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , """""" ) ) )
SCREAMING_SNAKE_CASE : List[str] = """What is the placebo?"""
SCREAMING_SNAKE_CASE : Dict = [
{
"""image""": load_image(lowerCamelCase_ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(lowerCamelCase_ , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [
[
{"""score""": ANY(lowerCamelCase_ ), """answer""": ANY(lowerCamelCase_ ), """start""": ANY(lowerCamelCase_ ), """end""": ANY(lowerCamelCase_ )},
{"""score""": ANY(lowerCamelCase_ ), """answer""": ANY(lowerCamelCase_ ), """start""": ANY(lowerCamelCase_ ), """end""": ANY(lowerCamelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
SCREAMING_SNAKE_CASE : Optional[int] = INVOICE_URL
SCREAMING_SNAKE_CASE : List[Any] = """How many cats are there?"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE : str = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE : Optional[Any] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , words=lowerCamelCase_ , boxes=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE : Optional[Any] = """What is the invoice number?"""
SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE : List[Any] = INVOICE_URL
SCREAMING_SNAKE_CASE : Union[str, Any] = """What is the invoice number?"""
SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase_ , revision="""3dc6de3""" , )
SCREAMING_SNAKE_CASE : Optional[Any] = INVOICE_URL
SCREAMING_SNAKE_CASE : int = """What is the invoice number?"""
SCREAMING_SNAKE_CASE : int = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
SCREAMING_SNAKE_CASE : Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
SCREAMING_SNAKE_CASE : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , """""" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase_ , revision="""3dc6de3""" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE : int = INVOICE_URL
SCREAMING_SNAKE_CASE : Tuple = """What is the invoice number?"""
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , """""" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE : int = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE : int = """What is the invoice number?"""
SCREAMING_SNAKE_CASE : Optional[Any] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
| 79
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase = random.Random()
def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE : Optional[Any] = global_rng
SCREAMING_SNAKE_CASE : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = min_seq_length
SCREAMING_SNAKE_CASE : Any = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : int = spectrogram_length
SCREAMING_SNAKE_CASE : List[Any] = feature_size
SCREAMING_SNAKE_CASE : Any = num_audio_channels
SCREAMING_SNAKE_CASE : Tuple = hop_length
SCREAMING_SNAKE_CASE : str = chunk_length
SCREAMING_SNAKE_CASE : Dict = sampling_rate
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ):
'''simple docstring'''
def _flatten(lowerCamelCase_ : Dict ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
| 79
| 1
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( _lowerCAmelCase ):
def __init__( self ,*_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ) -> str:
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = eval_examples
_snake_case = post_process_function
def _lowercase ( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE = "eval" ) -> Any:
_snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
_snake_case = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
_snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_snake_case = time.time()
try:
_snake_case = eval_loop(
_SCREAMING_SNAKE_CASE ,description="Evaluation" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_SCREAMING_SNAKE_CASE ,metric_key_prefix=_SCREAMING_SNAKE_CASE ,)
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_snake_case = self.post_process_function(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,output.predictions )
_snake_case = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_snake_case = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
_snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_snake_case = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE = "test" ) -> Union[str, Any]:
_snake_case = self.get_test_dataloader(_SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_snake_case = time.time()
try:
_snake_case = eval_loop(
_SCREAMING_SNAKE_CASE ,description="Prediction" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_SCREAMING_SNAKE_CASE ,metric_key_prefix=_SCREAMING_SNAKE_CASE ,)
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_snake_case = self.post_process_function(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,output.predictions ,"predict" )
_snake_case = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_snake_case = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_SCREAMING_SNAKE_CASE )
| 185
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = '''deta'''
a_ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=9_0_0 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=6 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=3_0_0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.25 , **UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCAmelCase =CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''])
else:
if isinstance(UpperCAmelCase , UpperCAmelCase):
__UpperCAmelCase =backbone_config.pop('''model_type''')
__UpperCAmelCase =CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase =config_class.from_dict(UpperCAmelCase)
__UpperCAmelCase =backbone_config
__UpperCAmelCase =num_queries
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =d_model
__UpperCAmelCase =encoder_ffn_dim
__UpperCAmelCase =encoder_layers
__UpperCAmelCase =encoder_attention_heads
__UpperCAmelCase =decoder_ffn_dim
__UpperCAmelCase =decoder_layers
__UpperCAmelCase =decoder_attention_heads
__UpperCAmelCase =dropout
__UpperCAmelCase =attention_dropout
__UpperCAmelCase =activation_dropout
__UpperCAmelCase =activation_function
__UpperCAmelCase =init_std
__UpperCAmelCase =init_xavier_std
__UpperCAmelCase =encoder_layerdrop
__UpperCAmelCase =auxiliary_loss
__UpperCAmelCase =position_embedding_type
# deformable attributes
__UpperCAmelCase =num_feature_levels
__UpperCAmelCase =encoder_n_points
__UpperCAmelCase =decoder_n_points
__UpperCAmelCase =two_stage
__UpperCAmelCase =two_stage_num_proposals
__UpperCAmelCase =with_box_refine
__UpperCAmelCase =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCAmelCase =class_cost
__UpperCAmelCase =bbox_cost
__UpperCAmelCase =giou_cost
# Loss coefficients
__UpperCAmelCase =mask_loss_coefficient
__UpperCAmelCase =dice_loss_coefficient
__UpperCAmelCase =bbox_loss_coefficient
__UpperCAmelCase =giou_loss_coefficient
__UpperCAmelCase =eos_coefficient
__UpperCAmelCase =focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase)
@property
def A__ (self):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ (self):
'''simple docstring'''
return self.d_model
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =copy.deepcopy(self.__dict__)
__UpperCAmelCase =self.backbone_config.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
| 132
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''PerceiverFeatureExtractor''']
_snake_case = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170
|
def __lowerCamelCase ( _lowercase , _lowercase = False ) -> str:
if not isinstance(_lowercase , _lowercase ):
UpperCamelCase = F'Expected string as input, found {type(_lowercase )}'
raise ValueError(_lowercase )
if not isinstance(_lowercase , _lowercase ):
UpperCamelCase = F'Expected boolean as use_pascal parameter, found {type(_lowercase )}'
raise ValueError(_lowercase )
UpperCamelCase = input_str.split('_' )
UpperCamelCase = 0 if use_pascal else 1
UpperCamelCase = words[start_index:]
UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 170
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] , snake_case__ :list[int] , snake_case__ :int ) -> tuple[float, list[float]]:
_lowercase = list(range(len(snake_case__ ) ) )
_lowercase = [v / w for v, w in zip(snake_case__ , snake_case__ )]
index.sort(key=lambda snake_case__ : ratio[i] , reverse=snake_case__ )
_lowercase = 0
_lowercase = [0] * len(snake_case__ )
for i in index:
if weight[i] <= capacity:
_lowercase = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowercase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( lowercase__ :Optional[Any], lowercase__ :List[str]=0.999, lowercase__ :Optional[int]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ :str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ :Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(lowercase__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ), lowercase__ ) )
return torch.tensor(lowercase__, dtype=torch.floataa )
class __snake_case (lowerCamelCase , lowerCamelCase ):
__a = [e.name for e in KarrasDiffusionSchedulers]
__a = 2
@register_to_config
def __init__( self: Any , A_: int = 10_00 , A_: float = 0.00_085 , A_: float = 0.012 , A_: str = "linear" , A_: Optional[Union[np.ndarray, List[float]]] = None , A_: str = "epsilon" , A_: Optional[bool] = False , A_: Optional[bool] = False , A_: float = 1.0 , A_: str = "linspace" , A_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(A_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(A_ , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
__lowerCamelCase = betas_for_alpha_bar(A_ , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A_ , A_ , A_ )
__lowerCamelCase = use_karras_sigmas
def __a ( self: Optional[Any] , A_: List[Any] , A_: Tuple=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(A_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __a ( self: Tuple ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __a ( self: Union[str, Any] , A_: torch.FloatTensor , A_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(A_ )
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __a ( self: str , A_: int , A_: Union[str, torch.device] = None , A_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = np.log(A_ )
__lowerCamelCase = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ )
if self.config.use_karras_sigmas:
__lowerCamelCase = self._convert_to_karras(in_sigmas=A_ , num_inference_steps=self.num_inference_steps )
__lowerCamelCase = np.array([self._sigma_to_t(A_ , A_ ) for sigma in sigmas] )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(A_ ).to(device=A_ )
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.from_numpy(A_ )
__lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = timesteps.to(A_ , dtype=torch.floataa )
else:
__lowerCamelCase = timesteps.to(device=A_ )
# empty dt and derivative
__lowerCamelCase = None
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(A_ )
def __a ( self: Any , A_: int , A_: int ):
# get log sigma
__lowerCamelCase = np.log(A_ )
# get distribution
__lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = log_sigmas[low_idx]
__lowerCamelCase = log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = np.clip(A_ , 0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.reshape(sigma.shape )
return t
def __a ( self: Dict , A_: torch.FloatTensor , A_: Tuple ):
__lowerCamelCase = in_sigmas[-1].item()
__lowerCamelCase = in_sigmas[0].item()
__lowerCamelCase = 7.0 # 7.0 is the value used in the paper
__lowerCamelCase = np.linspace(0 , 1 , A_ )
__lowerCamelCase = sigma_min ** (1 / rho)
__lowerCamelCase = sigma_max ** (1 / rho)
__lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __a ( self: List[Any] ):
return self.dt is None
def __a ( self: Union[str, Any] , A_: Union[torch.FloatTensor, np.ndarray] , A_: Union[float, torch.FloatTensor] , A_: Union[torch.FloatTensor, np.ndarray] , A_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(A_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__lowerCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
# store for 2nd order step
__lowerCamelCase = derivative
__lowerCamelCase = dt
__lowerCamelCase = sample
else:
# 2. 2nd order / Heun's method
__lowerCamelCase = (sample - pred_original_sample) / sigma_next
__lowerCamelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCamelCase = self.dt
__lowerCamelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def __a ( self: str , A_: torch.FloatTensor , A_: torch.FloatTensor , A_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(A_ , A_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 281
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> bool:
if num < 0:
return False
__lowerCAmelCase : int = num
__lowerCAmelCase : int = 0
while num > 0:
__lowerCAmelCase : Any = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _a ( lowerCamelCase_="" ):
snake_case : Any =tempfile.mkdtemp()
return os.path.join(lowerCamelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : List[str] =torch.rand(12, dtype=torch.floataa ) - 0.5
snake_case : List[str] =AgentAudio(_snake_case )
snake_case : Any =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_snake_case, agent_type.to_raw(), atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_snake_case ) )
# Ensure that the file contains the same value as the original tensor
snake_case , snake_case : List[Any] =sf.read(_snake_case )
self.assertTrue(torch.allclose(_snake_case, torch.tensor(_snake_case ), atol=1E-4 ) )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Dict =torch.rand(12, dtype=torch.floataa ) - 0.5
snake_case : Optional[Any] =get_new_path(suffix='''.wav''' )
sf.write(_snake_case, _snake_case, 16_000 )
snake_case : Any =AgentAudio(_snake_case )
self.assertTrue(torch.allclose(_snake_case, agent_type.to_raw(), atol=1E-4 ) )
self.assertEqual(agent_type.to_string(), _snake_case )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case : Any =torch.randint(0, 256, (64, 64, 3) )
snake_case : Union[str, Any] =AgentImage(_snake_case )
snake_case : List[Any] =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_snake_case, agent_type._tensor, atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_snake_case ) )
def __snake_case ( self : List[str] ):
'''simple docstring'''
snake_case : str =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case : Optional[int] =Image.open(_snake_case )
snake_case : int =AgentImage(_snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_snake_case ) )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Tuple =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case : Any =Image.open(_snake_case )
snake_case : List[str] =AgentImage(_snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_snake_case ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Dict ='''Hey!'''
snake_case : Optional[int] =AgentText(_snake_case )
self.assertEqual(_snake_case, agent_type.to_string() )
self.assertEqual(_snake_case, agent_type.to_raw() )
self.assertEqual(_snake_case, _snake_case )
| 349
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = DebertaTokenizer
__UpperCAmelCase = True
__UpperCAmelCase = DebertaTokenizerFast
def __snake_case ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : List[Any] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
snake_case : Dict =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
snake_case : Tuple =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case : List[Any] ={'''unk_token''': '''[UNK]'''}
snake_case : List[Any] =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : Tuple =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def __snake_case ( self : str, **_snake_case : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_snake_case )
def __snake_case ( self : List[str], _snake_case : List[str] ):
'''simple docstring'''
snake_case : List[str] ='''lower newer'''
snake_case : Optional[int] ='''lower newer'''
return input_text, output_text
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : List[Any] =self.get_tokenizer()
snake_case : List[Any] ='''lower newer'''
snake_case : Union[str, Any] =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case : str =tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case, _snake_case )
snake_case : Any =tokens + [tokenizer.unk_token]
snake_case : List[Any] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), _snake_case )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Optional[Any] =self.get_tokenizer()
snake_case : Any =tokenizer('''Hello''', '''World''' )
snake_case : List[Any] =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''], _snake_case )
@slow
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : int =self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : List[Any] =tokenizer.encode('''sequence builders''', add_special_tokens=_snake_case )
snake_case : str =tokenizer.encode('''multi-sequence build''', add_special_tokens=_snake_case )
snake_case : Union[str, Any] =tokenizer.encode(
'''sequence builders''', add_special_tokens=_snake_case, add_prefix_space=_snake_case )
snake_case : Optional[int] =tokenizer.encode(
'''sequence builders''', '''multi-sequence build''', add_special_tokens=_snake_case, add_prefix_space=_snake_case )
snake_case : str =tokenizer.build_inputs_with_special_tokens(_snake_case )
snake_case : Tuple =tokenizer.build_inputs_with_special_tokens(_snake_case, _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case : int =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case : Optional[int] =tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : Optional[Any] =[
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
snake_case : int =tokenizer(_snake_case, padding=_snake_case )
snake_case : str =[tokenizer.decode(_snake_case, skip_special_tokens=_snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
snake_case : Optional[Any] ={
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case : Tuple =[
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data, _snake_case )
for expected, decoded in zip(_snake_case, _snake_case ):
self.assertEqual(_snake_case, _snake_case )
| 349
| 1
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 165
|
"""simple docstring"""
def _lowerCAmelCase(a : int = 3 , a : int = 7 , a : int = 100_0000 ) -> int:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =1
for current_denominator in range(1 , limit + 1 ):
_SCREAMING_SNAKE_CASE =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_SCREAMING_SNAKE_CASE =current_numerator
_SCREAMING_SNAKE_CASE =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 165
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __magic_name__ ( lowercase ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : Union[str, Any] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase_ : int = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
lowercase_ : Dict = [[0.0, 0.0], [0.0, 0.0]]
lowercase_ , lowercase_ : Tuple = matrix[1][1], matrix[0][0]
lowercase_ , lowercase_ : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase_ : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
lowercase_ : List[str] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase_ : List[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase_ : int = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase_ : int = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase_ : str = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase_ : Optional[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase_ : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase_ : Tuple = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase_ : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase_ : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase_ : Optional[Any] = array(lowercase )
for i in range(3 ):
for j in range(3 ):
lowercase_ : int = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase_ : Optional[int] = array(lowercase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase )
# Calculate the inverse of the matrix
return [[float(d(lowercase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 458
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = False, ) -> Any:
"""simple docstring"""
super().__init__()
lowercase_ : str = nn.Embedding(snake_case__, snake_case__ )
lowercase_ : Dict = nn.Embedding(snake_case__, snake_case__ )
lowercase_ : List[str] = False
lowercase_ : Optional[int] = nn.Dropout(p=snake_case__ )
lowercase_ : Any = TaConfig(
vocab_size=snake_case__, d_model=snake_case__, num_heads=snake_case__, d_kv=snake_case__, d_ff=snake_case__, dropout_rate=snake_case__, feed_forward_proj=snake_case__, is_decoder=snake_case__, is_encoder_decoder=snake_case__, )
lowercase_ : List[Any] = nn.ModuleList()
for lyr_num in range(snake_case__ ):
lowercase_ : List[str] = TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
lowercase_ : Tuple = TaLayerNorm(snake_case__ )
lowercase_ : Dict = nn.Dropout(p=snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.token_embedder(snake_case__ )
lowercase_ : Tuple = encoder_input_tokens.shape[1]
lowercase_ : List[str] = torch.arange(snake_case__, device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
lowercase_ : Union[str, Any] = self.dropout_pre(snake_case__ )
# inverted the attention mask
lowercase_ : List[str] = encoder_input_tokens.size()
lowercase_ : Union[str, Any] = self.get_extended_attention_mask(snake_case__, snake_case__ )
for lyr in self.encoders:
lowercase_ : Optional[Any] = lyr(snake_case__, snake_case__ )[0]
lowercase_ : Dict = self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask
| 458
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''detr'''
UpperCAmelCase_ : List[str] = ['''past_key_values''']
UpperCAmelCase_ : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=3 , __lowerCAmelCase=100 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase="resnet50" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = backbone_config.get("""model_type""")
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(__lowerCAmelCase)
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None, None, None
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
return self.encoder_attention_heads
@property
def a_ ( self):
"""simple docstring"""
return self.d_model
@classmethod
def a_ ( cls , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return cls(backbone_config=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = version.parse('''1.11''' )
@property
def a_ ( self):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def a_ ( self):
"""simple docstring"""
return 1E-5
@property
def a_ ( self):
"""simple docstring"""
return 12
| 370
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__lowercase = '''Create a default config file for Accelerate with only a few flags set.'''
def snake_case__ ( _A: Dict="no" , _A: str = default_json_config_file , _A: bool = False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = Path(_A )
path.parent.mkdir(parents=_A , exist_ok=_A )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
lowerCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
lowerCAmelCase = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
lowerCAmelCase = num_gpus
lowerCAmelCase = False
if num_gpus > 1:
lowerCAmelCase = """MULTI_GPU"""
else:
lowerCAmelCase = """NO"""
elif is_xpu_available() and use_xpu:
lowerCAmelCase = torch.xpu.device_count()
lowerCAmelCase = num_xpus
lowerCAmelCase = False
if num_xpus > 1:
lowerCAmelCase = """MULTI_XPU"""
else:
lowerCAmelCase = """NO"""
elif is_npu_available():
lowerCAmelCase = torch.npu.device_count()
lowerCAmelCase = num_npus
lowerCAmelCase = False
if num_npus > 1:
lowerCAmelCase = """MULTI_NPU"""
else:
lowerCAmelCase = """NO"""
else:
lowerCAmelCase = 0
lowerCAmelCase = True
lowerCAmelCase = 1
lowerCAmelCase = """NO"""
lowerCAmelCase = ClusterConfig(**_A )
config.to_json_file(_A )
return path
def snake_case__ ( _A: str , _A: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = parser.add_parser("""default""" , parents=_A , help=_A , formatter_class=_A )
parser.add_argument(
"""--config_file""" , default=_A , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=_A , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=_A )
return parser
def snake_case__ ( _A: Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 370
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = "▁"
__lowerCamelCase : List[Any] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCamelCase : Tuple = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
__lowerCamelCase : Any = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
__lowerCamelCase : List[Any] = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __magic_name__ ( A__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : str =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =['''input_ids''', '''attention_mask''']
lowercase : List[int] =[]
lowercase : List[int] =[]
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int=None , UpperCamelCase__ : int=None , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="<pad>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : str="m2m100" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : Any=8 , **UpperCamelCase__ : int , ) -> None:
'''simple docstring'''
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = language_codes
UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(UpperCamelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(UpperCamelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , language_codes=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = load_json(UpperCamelCase__ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = spm_file
UpperCAmelCase = load_spm(UpperCamelCase__ , self.sp_model_kwargs )
UpperCAmelCase = len(self.encoder )
UpperCAmelCase = {
self.get_lang_token(UpperCamelCase__ ): self.encoder_size + i for i, lang_code in enumerate(UpperCamelCase__ )
}
UpperCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(UpperCamelCase__ )}
UpperCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase = src_lang if src_lang is not None else "en"
UpperCAmelCase = tgt_lang
UpperCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase = num_madeup_words
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None:
'''simple docstring'''
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(UpperCamelCase__ , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : List[str] , UpperCamelCase__ : Dict ) -> None:
'''simple docstring'''
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase = {}
UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase = Path(UpperCamelCase__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , UpperCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (str(UpperCamelCase__ ), str(UpperCamelCase__ ))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro" , **UpperCamelCase__ : Tuple , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase = src_lang
UpperCAmelCase = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = self.get_lang_id(UpperCamelCase__ )
UpperCAmelCase = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None:
'''simple docstring'''
UpperCAmelCase = self.get_lang_token(UpperCamelCase__ )
UpperCAmelCase = self.lang_token_to_id[lang_token]
UpperCAmelCase = [self.cur_lang_id]
UpperCAmelCase = [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : str ) -> None:
'''simple docstring'''
UpperCAmelCase = self.get_lang_token(UpperCamelCase__ )
UpperCAmelCase = self.lang_token_to_id[lang_token]
UpperCAmelCase = [self.cur_lang_id]
UpperCAmelCase = [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
UpperCAmelCase = self.get_lang_token(UpperCamelCase__ )
return self.lang_token_to_id[lang_token]
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> sentencepiece.SentencePieceProcessor:
UpperCAmelCase = sentencepiece.SentencePieceProcessor(**lowerCamelCase_ )
spm.Load(str(lowerCamelCase_ ) )
return spm
def lowerCamelCase_(lowerCamelCase_ ) -> Union[Dict, List]:
with open(lowerCamelCase_ , "r" ) as f:
return json.load(lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> None:
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2 )
| 457
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : Optional[int] = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : int = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = "blenderbot-small"
lowerCamelCase__ : Optional[int] = ["past_key_values"]
lowerCamelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a=5_0_2_6_5 , a=5_1_2 , a=8 , a=2_0_4_8 , a=1_6 , a=8 , a=2_0_4_8 , a=1_6 , a=0.0 , a=0.0 , a=True , a=True , a="gelu" , a=5_1_2 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1 , a=False , a=0 , a=1 , a=2 , a=2 , **a , ) -> List[Any]:
lowercase__ : str = vocab_size
lowercase__ : str = max_position_embeddings
lowercase__ : Any = d_model
lowercase__ : int = encoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Dict = encoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Tuple = decoder_layers
lowercase__ : Dict = decoder_attention_heads
lowercase__ : List[Any] = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : Tuple = activation_dropout
lowercase__ : str = activation_function
lowercase__ : Any = init_std
lowercase__ : str = encoder_layerdrop
lowercase__ : int = decoder_layerdrop
lowercase__ : Tuple = use_cache
lowercase__ : Dict = encoder_layers
lowercase__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowercase__ : Tuple = {0: 'batch'}
lowercase__ : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase__ : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowercase__ , lowercase__ : Optional[int] = self.num_layers
for i in range(UpperCamelCase_ ):
lowercase__ : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase__ : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowercase__ : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : int = super().outputs
else:
lowercase__ : str = super(UpperCamelCase_ , self ).outputs
if self.use_past:
lowercase__ , lowercase__ : Any = self.num_layers
for i in range(UpperCamelCase_ ):
lowercase__ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase__ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _UpperCAmelCase ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
lowercase__ : int = seq_length if not self.use_past else 1
lowercase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowercase__ : Optional[Any] = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase__ , lowercase__ : List[Any] = common_inputs['input_ids'].shape
lowercase__ : Union[str, Any] = common_inputs['decoder_input_ids'].shape[1]
lowercase__ , lowercase__ : Union[str, Any] = self.num_attention_heads
lowercase__ : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : List[Any] = decoder_seq_length + 3
lowercase__ : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase__ : List[Any] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
lowercase__ : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase__ , lowercase__ : Dict = self.num_layers
lowercase__ : List[Any] = min(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ : Tuple = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
lowercase__ : List[str] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
lowercase__ : str = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def _UpperCAmelCase ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
lowercase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase__ , lowercase__ : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase__ : List[Any] = seqlen + 2
lowercase__ , lowercase__ : Any = self.num_layers
lowercase__ , lowercase__ : int = self.num_attention_heads
lowercase__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : Dict = common_inputs['attention_mask'].dtype
lowercase__ : Tuple = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
lowercase__ : List[Any] = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def _UpperCAmelCase ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : Union[str, Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : List[Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowercase__ : Union[str, Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : List[str] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase__ : int = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def _UpperCAmelCase ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
elif self.task == "causal-lm":
lowercase__ : int = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
lowercase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def _UpperCAmelCase ( self , a , a , a , a ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Any = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
lowercase__ : Any = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 599
|
'''simple docstring'''
import enum
import shutil
import sys
a__ , a__ : Any = shutil.get_terminal_size()
a__ : Optional[int] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class __snake_case ( enum.Enum ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_="" ) ->Optional[Any]:
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="" ) ->List[str]:
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase_ )
def __lowerCamelCase ( ) ->Optional[Any]:
forceWrite('\r' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def __lowerCamelCase ( ) ->str:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ) ->Tuple:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 368
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=7 ,_A=3 ,_A=30 ,_A=400 ,_A=True ,_A=None ,_A=0.9 ,_A=None ,_A=True ,_A=[0.5, 0.5, 0.5] ,_A=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 30}
_lowerCAmelCase : Any = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : str = min_resolution
_lowerCAmelCase : List[str] = max_resolution
_lowerCAmelCase : Any = do_resize_and_center_crop
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : List[Any] = crop_pct
_lowerCAmelCase : Optional[Any] = crop_size
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Optional[int] = image_std
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A ,'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_A ,'size' ) )
self.assertTrue(hasattr(_A ,'crop_pct' ) )
self.assertTrue(hasattr(_A ,'do_normalize' ) )
self.assertTrue(hasattr(_A ,'image_mean' ) )
self.assertTrue(hasattr(_A ,'image_std' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size ,{'height': 30, 'width': 30} )
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A ,Image.Image )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Any = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,torch.Tensor )
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : List[Any] = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 16
|
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16
| 1
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'ernie_m'
lowercase_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] , a_ : int = 25_0002 , a_ : int = 768 , a_ : int = 12 , a_ : int = 12 , a_ : int = 3072 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 514 , a_ : float = 0.02 , a_ : int = 1 , a_ : float = 1e-0_5 , a_ : List[Any]=None , a_ : Dict=False , a_ : Union[str, Any]=0.0 , **a_ : List[Any] , )-> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = classifier_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = is_decoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = act_dropout
| 85
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """timm_backbone"""
def __init__( self :Any , lowerCamelCase_ :int=None , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :int=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Union[str, Any]=None , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =backbone
lowerCamelCase__ : List[Any] =num_channels
lowerCamelCase__ : Tuple =features_only
lowerCamelCase__ : Dict =use_pretrained_backbone
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[int] =out_indices if out_indices is not None else (-1,)
| 174
| 0
|
"""simple docstring"""
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = '''microsoft/speecht5_tts'''
__lowerCAmelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
__lowerCAmelCase = '''text_reader'''
__lowerCAmelCase = SpeechTaProcessor
__lowerCAmelCase = SpeechTaForTextToSpeech
__lowerCAmelCase = SpeechTaHifiGan
__lowerCAmelCase = ['''text''']
__lowerCAmelCase = ['''audio''']
def _lowerCamelCase ( self ):
if self.post_processor is None:
__a : Dict = "microsoft/speecht5_hifigan"
super().setup()
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : Tuple = self.pre_processor(text=lowercase_ , return_tensors='''pt''' , truncation=lowercase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__a : Optional[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__a : List[Any] = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCamelCase ( self , _UpperCAmelCase ):
with torch.no_grad():
return self.model.generate_speech(**lowercase_ )
def _lowerCamelCase ( self , _UpperCAmelCase ):
with torch.no_grad():
return self.post_processor(lowercase_ ).cpu().detach()
| 704
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Any = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : str = edge
__a : int = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Any = f.read().strip().split('''\n''')
__a : Union[str, Any] = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : int = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 101
| 0
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__a = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "maskformer"
lowercase = {"hidden_size": "mask_feature_size"}
lowercase = ["resnet", "swin"]
lowercase = ["detr"]
def __init__( self : Union[str, Any] , snake_case_ : int = 256 , snake_case_ : int = 256 , snake_case_ : float = 0.1 , snake_case_ : bool = False , snake_case_ : Optional[Dict] = None , snake_case_ : Optional[Dict] = None , snake_case_ : float = 0.02 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 20.0 , snake_case_ : Optional[bool] = None , **snake_case_ : Tuple , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case__ : Dict = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Any = backbone_config.pop("""model_type""" )
snake_case__ : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case__ : List[str] = config_class.from_dict(snake_case_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case__ : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
snake_case__ : int = (
decoder_config.pop("""model_type""" ) if isinstance(snake_case_ , snake_case_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[Any] = CONFIG_MAPPING[decoder_type]
snake_case__ : Union[str, Any] = config_class.from_dict(snake_case_ )
snake_case__ : Optional[int] = backbone_config
snake_case__ : Optional[Any] = decoder_config
# main feature dimension for the model
snake_case__ : int = fpn_feature_size
snake_case__ : List[str] = mask_feature_size
# initializer
snake_case__ : Dict = init_std
snake_case__ : List[str] = init_xavier_std
# Hungarian matcher && loss
snake_case__ : Optional[Any] = cross_entropy_weight
snake_case__ : Union[str, Any] = dice_weight
snake_case__ : List[Any] = mask_weight
snake_case__ : List[Any] = use_auxiliary_loss
snake_case__ : Tuple = no_object_weight
snake_case__ : Optional[int] = output_auxiliary_logits
snake_case__ : Union[str, Any] = self.decoder_config.encoder_attention_heads
snake_case__ : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**snake_case_ )
@classmethod
def lowerCamelCase ( cls : Optional[int] , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : Optional[Any] ):
return cls(
backbone_config=snake_case_ , decoder_config=snake_case_ , **snake_case_ , )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : str = copy.deepcopy(self.__dict__ )
snake_case__ : int = self.backbone_config.to_dict()
snake_case__ : Optional[int] = self.decoder_config.to_dict()
snake_case__ : Union[str, Any] = self.__class__.model_type
return output
| 374
|
'''simple docstring'''
import sys
__a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __snake_case( _lowerCAmelCase = N ) -> int:
snake_case__ : Dict = -sys.maxsize - 1
for i in range(len(_lowerCAmelCase ) - 12 ):
snake_case__ : Any = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : List[str] = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 374
| 1
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCAmelCase_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCAmelCase_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 717
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635
| 0
|
import math
def snake_case ( snake_case__ :int = 100) -> int:
_A = sum(i * i for i in range(1 , n + 1))
_A = int(math.pow(sum(range(1 , n + 1)) , 2))
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 401
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_SCREAMING_SNAKE_CASE = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case ( snake_case__ :List[str]) -> List[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def snake_case ( snake_case__ :Optional[Any]) -> Dict:
from transformers.testing_utils import pytest_terminal_summary_main
_A = terminalreporter.config.getoption("""--make-reports""")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
| 401
| 1
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase : Union[str, Any] = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase : int = "sail/poolformer_s12"
__UpperCAmelCase : Optional[int] = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase : Dict = "sail/poolformer_s12"
__UpperCAmelCase : List[Any] = "tabby, tabby cat"
__UpperCAmelCase : str = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = False) -> List[Any]:
if drop_prob == 0.0 or not training:
return input
__snake_case: List[Any] = 1 - drop_prob
__snake_case: str = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__snake_case: Optional[Any] = keep_prob + torch.rand(SCREAMING_SNAKE_CASE__ , dtype=input.dtype , device=input.device)
random_tensor.floor_() # binarize
__snake_case: Any = input.div(SCREAMING_SNAKE_CASE__) * random_tensor
return output
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : str , A : Optional[float] = None ):
super().__init__()
__snake_case: Dict = drop_prob
def UpperCAmelCase__ ( self : List[str] , A : torch.Tensor ):
return drop_path(A , self.drop_prob , self.training )
def UpperCAmelCase__ ( self : Optional[int] ):
return "p={}".format(self.drop_prob )
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , A : str , A : Dict , A : Optional[Any] , A : Any , A : List[Any] , A : int=None ):
super().__init__()
__snake_case: List[str] = patch_size if isinstance(A , collections.abc.Iterable ) else (patch_size, patch_size)
__snake_case: Any = stride if isinstance(A , collections.abc.Iterable ) else (stride, stride)
__snake_case: Any = padding if isinstance(A , collections.abc.Iterable ) else (padding, padding)
__snake_case: str = nn.Convad(A , A , kernel_size=A , stride=A , padding=A )
__snake_case: Optional[int] = norm_layer(A ) if norm_layer else nn.Identity()
def UpperCAmelCase__ ( self : Tuple , A : int ):
__snake_case: List[str] = self.projection(A )
__snake_case: Optional[Any] = self.norm(A )
return embeddings
class __snake_case ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Optional[Any] , A : Optional[int] , **A : List[Any] ):
super().__init__(1 , A , **A )
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Dict ):
super().__init__()
__snake_case: Any = nn.AvgPoolad(A , stride=1 , padding=pool_size // 2 , count_include_pad=A )
def UpperCAmelCase__ ( self : str , A : Dict ):
return self.pool(A ) - hidden_states
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A : Optional[int] , A : Tuple , A : int , A : str ):
super().__init__()
__snake_case: Dict = nn.Convad(A , A , 1 )
__snake_case: Dict = nn.Convad(A , A , 1 )
__snake_case: str = PoolFormerDropPath(A )
if isinstance(config.hidden_act , A ):
__snake_case: List[str] = ACTaFN[config.hidden_act]
else:
__snake_case: Any = config.hidden_act
def UpperCAmelCase__ ( self : Optional[int] , A : str ):
__snake_case: Optional[int] = self.conva(A )
__snake_case: Optional[int] = self.act_fn(A )
__snake_case: List[Any] = self.drop(A )
__snake_case: Dict = self.conva(A )
__snake_case: Any = self.drop(A )
return hidden_states
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , A : Any , A : Tuple , A : Optional[Any] , A : List[str] , A : Tuple , A : List[str] ):
super().__init__()
__snake_case: Tuple = PoolFormerPooling(A )
__snake_case: List[Any] = PoolFormerOutput(A , A , A , A )
__snake_case: str = PoolFormerGroupNorm(A )
__snake_case: Optional[int] = PoolFormerGroupNorm(A )
# Useful for training neural nets
__snake_case: List[Any] = PoolFormerDropPath(A ) if drop_path > 0.0 else nn.Identity()
__snake_case: Tuple = config.use_layer_scale
if config.use_layer_scale:
__snake_case: Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
__snake_case: Optional[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
def UpperCAmelCase__ ( self : Optional[Any] , A : str ):
if self.use_layer_scale:
__snake_case: Tuple = self.pooling(self.before_norm(A ) )
__snake_case: int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__snake_case: Union[str, Any] = hidden_states + self.drop_path(A )
__snake_case: List[Any] = ()
__snake_case: List[str] = self.output(self.after_norm(A ) )
__snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__snake_case: Optional[int] = hidden_states + self.drop_path(A )
__snake_case: List[Any] = (output,) + outputs
return outputs
else:
__snake_case: Tuple = self.drop_path(self.pooling(self.before_norm(A ) ) )
# First residual connection
__snake_case: str = pooling_output + hidden_states
__snake_case: List[str] = ()
# Second residual connection inside the PoolFormerOutput block
__snake_case: Dict = self.drop_path(self.output(self.after_norm(A ) ) )
__snake_case: Union[str, Any] = hidden_states + layer_output
__snake_case: Dict = (output,) + outputs
return outputs
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , A : List[str] ):
super().__init__()
__snake_case: str = config
# stochastic depth decay rule
__snake_case: Dict = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__snake_case: Dict = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__snake_case: int = nn.ModuleList(A )
# Transformer blocks
__snake_case: List[Any] = []
__snake_case: str = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__snake_case: Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(A ) )
__snake_case: List[str] = nn.ModuleList(A )
def UpperCAmelCase__ ( self : int , A : Dict , A : Tuple=False , A : Tuple=True ):
__snake_case: List[str] = () if output_hidden_states else None
__snake_case: Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__snake_case , __snake_case: Tuple = layers
# Get patch embeddings from hidden_states
__snake_case: Dict = embedding_layer(A )
# Send the embeddings through the blocks
for _, blk in enumerate(A ):
__snake_case: Tuple = blk(A )
__snake_case: str = layer_outputs[0]
if output_hidden_states:
__snake_case: Optional[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A , hidden_states=A )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = PoolFormerConfig
lowerCAmelCase__ = """poolformer"""
lowerCAmelCase__ = """pixel_values"""
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Union[str, Any] , A : str ):
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCAmelCase__ ( self : Any , A : Any , A : int=False ):
if isinstance(A , A ):
__snake_case: List[str] = value
__UpperCAmelCase : Dict = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : List[str] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , __lowerCamelCase , )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , A : str ):
super().__init__(A )
__snake_case: str = config
__snake_case: Tuple = PoolFormerEncoder(A )
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[torch.FloatTensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ):
__snake_case: Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case: str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__snake_case: List[Any] = self.encoder(
A , output_hidden_states=A , return_dict=A , )
__snake_case: int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=A , hidden_states=encoder_outputs.hidden_states , )
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , A : List[Any] ):
super().__init__()
__snake_case: Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def UpperCAmelCase__ ( self : Optional[Any] , A : List[str] ):
__snake_case: Tuple = self.dense(A )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , __lowerCamelCase , )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Dict , A : int ):
super().__init__(A )
__snake_case: Union[str, Any] = config.num_labels
__snake_case: Optional[Any] = PoolFormerModel(A )
# Final norm
__snake_case: Any = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__snake_case: Tuple = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ):
__snake_case: str = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case: List[Any] = self.poolformer(
A , output_hidden_states=A , return_dict=A , )
__snake_case: Dict = outputs[0]
__snake_case: List[str] = self.classifier(self.norm(A ).mean([-2, -1] ) )
__snake_case: str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case: Any = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case: Union[str, Any] = """single_label_classification"""
else:
__snake_case: Optional[int] = """multi_label_classification"""
if self.config.problem_type == "regression":
__snake_case: Optional[int] = MSELoss()
if self.num_labels == 1:
__snake_case: Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case: Union[str, Any] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
__snake_case: int = CrossEntropyLoss()
__snake_case: int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case: Union[str, Any] = BCEWithLogitsLoss()
__snake_case: Tuple = loss_fct(A , A )
if not return_dict:
__snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A , logits=A , hidden_states=outputs.hidden_states )
| 155
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case ( ctypes.Structure ):
'''simple docstring'''
lowerCAmelCase__ = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def A__ ( ) -> List[Any]:
if os.name == "nt":
__snake_case: str = CursorInfo()
__snake_case: List[str] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
__snake_case: Optional[int] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def A__ ( ) -> List[Any]:
if os.name == "nt":
__snake_case: Dict = CursorInfo()
__snake_case: List[str] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
__snake_case: List[str] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def A__ ( ) -> int:
try:
hide_cursor()
yield
finally:
show_cursor()
| 155
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowercase : str = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowercase : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowercase : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len([g for position, g in enumerate(snake_case_ ) if g == main_target[position]] )
return (item, float(snake_case_ ))
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = random.randint(0 , len(snake_case_ ) - 1 )
__UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( snake_case_ :str , snake_case_ :list[str] ):
__UpperCAmelCase = list(snake_case_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCAmelCase = random.choice(snake_case_ )
return "".join(snake_case_ )
def lowercase__ ( snake_case_ :tuple[str, float] , snake_case_ :list[tuple[str, float]] , snake_case_ :list[str] , ):
__UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
__UpperCAmelCase = int(parent_a[1] * 100 ) + 1
__UpperCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(snake_case_ ):
__UpperCAmelCase = population_score[random.randint(0 , snake_case_ )][0]
__UpperCAmelCase , __UpperCAmelCase = crossover(parent_a[0] , snake_case_ )
# Append new string to the population list.
pop.append(mutate(snake_case_ , snake_case_ ) )
pop.append(mutate(snake_case_ , snake_case_ ) )
return pop
def lowercase__ ( snake_case_ :str , snake_case_ :list[str] , snake_case_ :bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__UpperCAmelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(snake_case_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCAmelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(snake_case_ )
# Generate random starting population.
__UpperCAmelCase = []
for _ in range(snake_case_ ):
population.append(''''''.join([random.choice(snake_case_ ) for i in range(len(snake_case_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCAmelCase , __UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(snake_case_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCAmelCase = [evaluate(snake_case_ , snake_case_ ) for item in population]
# Check if there is a matching evolution.
__UpperCAmelCase = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(snake_case_ )
# Normalize population score to be between 0 and 1.
__UpperCAmelCase = [
(item, score / len(snake_case_ )) for item, score in population_score
]
# This is selection
for i in range(snake_case_ ):
population.extend(select(population_score[int(snake_case_ )] , snake_case_ , snake_case_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(snake_case_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowercase : Optional[int] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_lowercase : Union[str, Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_lowercase ,_lowercase ,_lowercase : Optional[int] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 49
|
"""simple docstring"""
_A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def a__ ( ) -> None:
UpperCAmelCase__ : Optional[Any] = input("""Enter message: """ )
UpperCAmelCase__ : Optional[Any] = input("""Enter key [alphanumeric]: """ )
UpperCAmelCase__ : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCAmelCase__ : Optional[Any] = """encrypt"""
UpperCAmelCase__ : List[Any] = encrypt_message(lowerCAmelCase , lowerCAmelCase )
elif mode.lower().startswith("""d""" ):
UpperCAmelCase__ : Optional[Any] = """decrypt"""
UpperCAmelCase__ : Dict = decrypt_message(lowerCAmelCase , lowerCAmelCase )
print(F"""\n{mode.title()}ed message:""" )
print(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
return translate_message(lowerCAmelCase , lowerCAmelCase , """encrypt""" )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
return translate_message(lowerCAmelCase , lowerCAmelCase , """decrypt""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = key.upper()
for symbol in message:
UpperCAmelCase__ : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase ):
UpperCAmelCase__ : List[str] = 0
else:
translated.append(lowerCAmelCase )
return "".join(lowerCAmelCase )
if __name__ == "__main__":
main()
| 182
| 0
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase: Union[str, Any] = re.compile(r'\s+')
def lowerCamelCase__ ( _A ):
return {"hash": hashlib.mda(re.sub(_A , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = [len(_A ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_A ), "line_max": max(_A )}
def lowerCamelCase__ ( _A ):
a : Dict = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( _A , _A ):
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def lowerCamelCase__ ( _A , _A=5 ):
a : Dict = ['auto-generated', 'autogenerated', 'automatically generated']
a : Union[str, Any] = example['content'].splitlines()
for _, line in zip(range(_A ) , _A ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( _A , _A=5 , _A=0.05 ):
a : Optional[int] = ['unit tests', 'test file', 'configuration file']
a : Optional[int] = example['content'].splitlines()
a : Any = 0
a : int = 0
# first test
for _, line in zip(range(_A ) , _A ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
a : str = example['content'].count('\n' )
a : List[str] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( _A ):
a : List[str] = ['def ', 'class ', 'for ', 'while ']
a : Union[str, Any] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( _A , _A=4 ):
a : str = example['content'].splitlines()
a : str = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( _A ):
a : Tuple = tokenizer(example['content'] , truncation=_A )['input_ids']
a : int = len(example['content'] ) / len(_A )
return {"ratio": ratio}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = {}
results.update(get_hash(_A ) )
results.update(line_stats(_A ) )
results.update(alpha_stats(_A ) )
results.update(char_token_ratio(_A ) )
results.update(is_autogenerated(_A ) )
results.update(is_config_or_test(_A ) )
results.update(has_no_keywords(_A ) )
results.update(has_few_assignments(_A ) )
return results
def lowerCamelCase__ ( _A , _A , _A ):
if not check_uniques(_A , _A ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( _A ):
with open(_A , 'rb' ) as f_in:
with gzip.open(str(_A ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_A , _A )
os.unlink(_A )
# Settings
lowerCAmelCase: List[str] = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase: Any = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase: List[str] = multiprocessing.cpu_count()
lowerCAmelCase: List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase: Tuple = time.time()
lowerCAmelCase: Tuple = load_dataset(args.dataset_name, split='train')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCAmelCase: str = time.time()
lowerCAmelCase: str = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCAmelCase: Any = set(ds.unique('hash'))
lowerCAmelCase: Optional[int] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCAmelCase: Dict = time.time()
lowerCAmelCase: List[Any] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase: Any = time.time()
lowerCAmelCase , lowerCAmelCase: Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCAmelCase: Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase: int = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCAmelCase: Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase: Optional[int] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCAmelCase: Union[str, Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}")
| 195
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCAmelCase: Tuple = 'src/transformers'
lowerCAmelCase: Union[str, Any] = 'docs/source/en'
lowerCAmelCase: Dict = '.'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start prompt.
a : Dict = 0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCAmelCase: List[str] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase: Dict = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCAmelCase: Any = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase: Dict = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase__ ( _A ):
a : Tuple = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _A )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = 2 if text == '✅' or text == '❌' else len(_A )
a : Optional[int] = (width - text_length) // 2
a : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase__ ( ):
a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a : Union[str, Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
a : Tuple = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
a : int = collections.defaultdict(_A )
a : List[Any] = collections.defaultdict(_A )
a : List[Any] = collections.defaultdict(_A )
a : Union[str, Any] = collections.defaultdict(_A )
a : int = collections.defaultdict(_A )
# Let's lookup through all transformers object (once).
for attr_name in dir(_A ):
a : Optional[Any] = None
if attr_name.endswith('Tokenizer' ):
a : int = slow_tokenizers
a : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
a : str = fast_tokenizers
a : List[Any] = attr_name[:-13]
elif _re_tf_models.match(_A ) is not None:
a : Optional[Any] = tf_models
a : Dict = _re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
a : int = flax_models
a : Optional[Any] = _re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
a : Tuple = pt_models
a : Optional[int] = _re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_name_to_prefix.values():
a : Tuple = True
break
# Try again after removing the last word in the name
a : Tuple = ''.join(camel_case_split(_A )[:-1] )
# Let's build that table!
a : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
a : Tuple = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
a : Tuple = [len(_A ) + 2 for c in columns]
a : str = max([len(_A ) for name in model_names] ) + 2
# Build the table per se
a : List[Any] = '|' + '|'.join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
a : str = {True: '✅', False: '❌'}
for name in model_names:
a : Any = model_name_to_prefix[name]
a : Optional[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n"
return table
def lowerCamelCase__ ( _A=False ):
a , a , a , a : Tuple = _find_text_in_file(
filename=os.path.join(_A , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
a : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_A , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase: Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase: Any = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 195
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(__snake_case , __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case , add_adapter=__snake_case , adapter_stride=__snake_case , adapter_kernel_size=__snake_case , use_auth_token=__snake_case , output_hidden_size=__snake_case , )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case , use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder , __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 250004
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 10
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = (CMStochasticIterativeScheduler,)
lowerCamelCase_ = 1_0
def _UpperCAmelCase ( self : Any , **snake_case_ : Tuple ):
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**snake_case_ )
return config
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = 10
A : Dict = self.get_scheduler_config()
A : Optional[int] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
A : List[str] = scheduler.timesteps[0]
A : Any = scheduler.timesteps[1]
A : int = self.dummy_sample
A : str = 0.1 * sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**snake_case_ )
A : str = 1
scheduler.set_timesteps(snake_case_ )
A : Optional[int] = scheduler.timesteps
A : int = torch.manual_seed(0 )
A : Optional[Any] = self.dummy_model()
A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
A : Dict = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : Union[str, Any] = pred_prev_sample
A : List[str] = torch.sum(torch.abs(snake_case_ ) )
A : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : str = scheduler_class(**snake_case_ )
A : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
A : Optional[int] = scheduler.timesteps
A : Any = torch.manual_seed(0 )
A : Tuple = self.dummy_model()
A : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : str = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : str = pred_prev_sample
A : str = torch.sum(torch.abs(snake_case_ ) )
A : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Optional[int] = self.scheduler_classes[0]
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**snake_case_ )
A : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : List[str] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : Tuple = scheduler_class(**snake_case_ )
A : Any = [39, 30, 12, 1, 0]
A : List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : str = self.get_scheduler_config()
A : List[Any] = scheduler_class(**snake_case_ )
A : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 256
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=None , a_=None , a_=None , a_="resnet50" , a_=3 , a_=32 , a_=3 , a_=True , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Tuple = out_indices if out_indices is not None else [4]
__snake_case : Optional[Any] = stage_names
__snake_case : str = out_features
__snake_case : List[str] = backbone
__snake_case : Optional[int] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : str = num_channels
__snake_case : Optional[int] = use_pretrained_backbone
__snake_case : Optional[int] = is_training
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TimmBackbone,) if is_torch_available() else ()
lowerCamelCase__ ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = TimmBackboneModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''resnet18'''
__snake_case : Tuple = '''microsoft/resnet-18'''
__snake_case : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
__snake_case : Tuple = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case : Optional[Any] = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
__snake_case : Optional[int] = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(a_ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = True
__snake_case : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case : Dict = self.all_model_classes[0]
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
__snake_case : int = self._prepare_for_class(a_ , a_ )
__snake_case : Optional[Any] = model(**a_ )
__snake_case : int = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case : int = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : List[str] = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case : Optional[Any] = copy.deepcopy(a_ )
__snake_case : str = None
__snake_case : int = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case : Union[str, Any] = copy.deepcopy(a_ )
__snake_case : int = False
__snake_case : List[str] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(**a_ )
| 229
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =BlenderbotConfig
lowerCamelCase__ ={}
lowerCamelCase__ ='gelu'
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=32 , a_=2 , a_=4 , a_=37 , a_=0.1 , a_=0.1 , a_=20 , a_=2 , a_=1 , a_=0 , ):
'''simple docstring'''
__snake_case : Dict = parent
__snake_case : Tuple = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : List[str] = is_training
__snake_case : str = use_labels
__snake_case : Any = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Optional[Any] = eos_token_id
__snake_case : List[str] = pad_token_id
__snake_case : Tuple = bos_token_id
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : int = prepare_blenderbot_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = TFBlenderbotModel(config=a_ ).get_decoder()
__snake_case : Tuple = inputs_dict['''input_ids''']
__snake_case : List[Any] = input_ids[:1, :]
__snake_case : Any = inputs_dict['''attention_mask'''][:1, :]
__snake_case : List[Any] = inputs_dict['''head_mask''']
__snake_case : List[Any] = 1
# first forward pass
__snake_case : List[str] = model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_ )
__snake_case , __snake_case : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case : Tuple = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = model(a_ , attention_mask=a_ , past_key_values=a_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case : Any = output_from_no_past[:, -3:, random_slice_idx]
__snake_case : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a_ , a_ , rtol=1E-3 )
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : str=None , _snake_case : List[Any]=None , _snake_case : Optional[Any]=None , _snake_case : List[str]=None , _snake_case : Tuple=None , ) ->List[str]:
"""simple docstring"""
if attention_mask is None:
__snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCamelCase__ =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ =(
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = TFBlenderbotModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a_ )
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =['My friends are cool but they eat too many carbs.']
lowerCamelCase__ ='facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.tokenizer(self.src_text , return_tensors='''tf''' )
__snake_case : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
__snake_case : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 229
| 1
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int ) -> Dict:
"""simple docstring"""
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__a = ksize + 1
__a = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
# distance from center
__a = x - ksize // 2
__a = y - ksize // 2
# degree to radiant
__a = theta / 180 * np.pi
__a = np.cos(_theta )
__a = np.sin(_theta )
# get kernel x
__a = cos_theta * px + sin_theta * py
# get kernel y
__a = -sin_theta * px + cos_theta * py
# fill kernel
__a = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : Union[str, Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : List[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : Any = out / out.max() * 255
__UpperCamelCase : Any = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 448
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]:
UpperCamelCase :int = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[Any] = patch_size
UpperCamelCase :Optional[int] = max_length
UpperCamelCase :Union[str, Any] = num_mel_bins
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :Dict = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = scope
UpperCamelCase :List[Any] = frequency_stride
UpperCamelCase :Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension
UpperCamelCase :Optional[int] = num_patches + 2
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :str = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ) -> List[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = ASTModelTester(self )
UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Any = [*signature.parameters.keys()]
UpperCamelCase :Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Any = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Tuple:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.default_feature_extractor
UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.default_feature_extractor
UpperCamelCase , UpperCamelCase :Dict = prepare_audio()
UpperCamelCase :Dict = audio.squeeze().numpy()
UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658
| 0
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( UpperCamelCase__ , unittest.TestCase ):
lowerCAmelCase :str = PegasusTokenizer
lowerCAmelCase :Optional[int] = PegasusTokenizerFast
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :Optional[Any] = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : List[Any] = PegasusTokenizer(_a)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def snake_case__ ( self):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""")
def snake_case__ ( self , **_lowerCamelCase):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a)
def snake_case__ ( self , _lowerCamelCase):
return ("This is a test", "This is a test")
def snake_case__ ( self):
UpperCAmelCase__ : Dict = """</s>"""
UpperCAmelCase__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a) , _a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a) , _a)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<pad>""")
self.assertEqual(vocab_keys[1] , """</s>""")
self.assertEqual(vocab_keys[-1] , """v""")
self.assertEqual(len(_a) , 1103)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1103)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname)
UpperCAmelCase__ : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase__ : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a).input_ids[0]
UpperCAmelCase__ : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a).input_ids[0]
self.assertListEqual(_a , _a)
def snake_case__ ( self):
UpperCAmelCase__ : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase__ : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase__ : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase__ : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a).input_ids[0]
self.assertListEqual(_a , _a)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase__ : Any = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase__ : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase__ : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a).input_ids[0]
self.assertListEqual(_a , _a)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase__ : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase__ : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""")
UpperCAmelCase__ : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""")
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a) == 2 # input_ids, attention_mask.
@slow
def snake_case__ ( self):
# fmt: off
UpperCAmelCase__ : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class _snake_case ( UpperCamelCase__ , unittest.TestCase ):
lowerCAmelCase :Tuple = PegasusTokenizer
lowerCAmelCase :Tuple = PegasusTokenizerFast
lowerCAmelCase :Dict = True
lowerCAmelCase :Optional[int] = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""")
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def snake_case__ ( self):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""")
def snake_case__ ( self , **_lowerCamelCase):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a)
def snake_case__ ( self , _lowerCamelCase):
return ("This is a test", "This is a test")
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
UpperCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname)
UpperCAmelCase__ : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a).input_ids[0]
UpperCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a).input_ids[0]
self.assertListEqual(_a , _a)
@require_torch
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase__ : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase__ : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""")
UpperCAmelCase__ : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""")
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a) == 2 # input_ids, attention_mask.
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase__ : Any = self._large_tokenizer(_a).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 710
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__A =numpy.array([0, 0])
__A =numpy.array([0.5, 0.8_6_6_0_2_5_4])
__A =numpy.array([1, 0])
__A =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = initial_vectors
for _ in range(UpperCamelCase__ ):
UpperCAmelCase__ : Dict = iteration_step(UpperCamelCase__ )
return vectors
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase__ : Tuple = vectors[i + 1]
new_vectors.append(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = numpy.radians(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : int = numpy.cos(UpperCamelCase__ ), numpy.sin(UpperCamelCase__ )
UpperCAmelCase__ : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Any = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = zip(*UpperCamelCase__ )
plt.plot(UpperCamelCase__ , UpperCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 113
| 0
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
_lowercase : Any = eval_examples
_lowercase : List[Any] = post_process_function
def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase = "eval", **lowerCamelCase, ) -> Dict[str, float]:
"""simple docstring"""
_lowercase : Optional[Any] = gen_kwargs.copy()
_lowercase : List[Any] = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
_lowercase : Any = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
_lowercase : Optional[Any] = gen_kwargs
_lowercase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowercase : Optional[int] = self.get_eval_dataloader(lowerCamelCase)
_lowercase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : List[Any] = self.compute_metrics
_lowercase : int = None
_lowercase : Optional[Any] = time.time()
_lowercase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowercase : int = eval_loop(
lowerCamelCase, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : Union[str, Any] = compute_metrics
_lowercase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : Optional[int] = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
else:
_lowercase : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_lowercase : List[str] = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase)
return metrics
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase = "test", **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = gen_kwargs.copy()
_lowercase : str = self.get_test_dataloader(lowerCamelCase)
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : List[str] = self.compute_metrics
_lowercase : Any = None
_lowercase : str = time.time()
_lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowercase : str = eval_loop(
lowerCamelCase, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : int = compute_metrics
_lowercase : int = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase, 'predict')
_lowercase : Optional[Any] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : Optional[Any] = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase)
| 89
|
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89
| 1
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __A ( a_ : str , a_ : List[Any] )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = checkpoint
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : int = vae_state_dict['''encoder.conv_in.weight''']
SCREAMING_SNAKE_CASE : List[Any] = vae_state_dict['''encoder.conv_in.bias''']
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict['''encoder.conv_out.weight''']
SCREAMING_SNAKE_CASE : Tuple = vae_state_dict['''encoder.conv_out.bias''']
SCREAMING_SNAKE_CASE : int = vae_state_dict['''encoder.norm_out.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = vae_state_dict['''encoder.norm_out.bias''']
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''decoder.conv_in.weight''']
SCREAMING_SNAKE_CASE : Any = vae_state_dict['''decoder.conv_in.bias''']
SCREAMING_SNAKE_CASE : List[Any] = vae_state_dict['''decoder.conv_out.weight''']
SCREAMING_SNAKE_CASE : Dict = vae_state_dict['''decoder.conv_out.bias''']
SCREAMING_SNAKE_CASE : Union[str, Any] = vae_state_dict['''decoder.norm_out.weight''']
SCREAMING_SNAKE_CASE : Dict = vae_state_dict['''decoder.norm_out.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = vae_state_dict['''quant_conv.weight''']
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''quant_conv.bias''']
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict['''post_quant_conv.weight''']
SCREAMING_SNAKE_CASE : str = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
SCREAMING_SNAKE_CASE : List[Any] = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(a_ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(a_ )
}
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Any = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
SCREAMING_SNAKE_CASE : str = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''old''': F"down.{i}.block", '''new''': F"down_blocks.{i}.resnets"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
SCREAMING_SNAKE_CASE : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE : int = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : str = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE : List[str] = renew_vae_attention_paths(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
conv_attn_to_linear(a_ )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE : Union[str, Any] = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE : Tuple = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
SCREAMING_SNAKE_CASE : List[str] = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : str = {'''old''': F"up.{block_id}.block", '''new''': F"up_blocks.{i}.resnets"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : Dict = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
SCREAMING_SNAKE_CASE : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : List[str] = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE : Dict = renew_vae_attention_paths(a_ )
SCREAMING_SNAKE_CASE : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
conv_attn_to_linear(a_ )
return new_checkpoint
def __A ( a_ : str , a_ : str , )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE : Union[str, Any] = OmegaConf.load(a_ )
SCREAMING_SNAKE_CASE : List[Any] = 5_12
SCREAMING_SNAKE_CASE : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE : Dict = {}
with safe_open(a_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE : Any = f.get_tensor(a_ )
else:
SCREAMING_SNAKE_CASE : Any = torch.load(a_ , map_location=a_ )['''state_dict''']
# Convert the VAE model.
SCREAMING_SNAKE_CASE : List[Any] = create_vae_diffusers_config(a_ , image_size=a_ )
SCREAMING_SNAKE_CASE : Any = custom_convert_ldm_vae_checkpoint(a_ , a_ )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(**a_ )
vae.load_state_dict(a_ )
vae.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowerCamelCase__ : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 710
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """BlipImageProcessor"""
__lowerCAmelCase = """AutoTokenizer"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = False
super().__init__(snake_case_ , snake_case_ )
__UpperCAmelCase: Dict = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__UpperCAmelCase: Optional[Any] = self.tokenizer
__UpperCAmelCase: List[str] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
__UpperCAmelCase: Tuple = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
__UpperCAmelCase: Tuple = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
__UpperCAmelCase: int = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def lowercase_ ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase_ ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self.tokenizer.model_input_names
__UpperCAmelCase: int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 523
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = StableDiffusionPanoramaPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase: List[str] = DDIMScheduler()
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase: Union[str, Any] = CLIPTextModel(snake_case_ )
__UpperCAmelCase: List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase: int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase_ ( self , snake_case_ , snake_case_=0 ):
'''simple docstring'''
__UpperCAmelCase: Dict = torch.manual_seed(snake_case_ )
__UpperCAmelCase: Optional[Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Any = self.get_dummy_components()
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Tuple = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Optional[int] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: str = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Dict = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Tuple = self.get_dummy_components()
__UpperCAmelCase: Any = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: List[Any] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: int = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Optional[Any] = """french fries"""
__UpperCAmelCase: str = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
__UpperCAmelCase: Tuple = output.images
__UpperCAmelCase: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: int = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: List[str] = self.get_dummy_components()
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Dict = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: str = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Dict = sd_pipe(**snake_case_ , view_batch_size=2 )
__UpperCAmelCase: Dict = output.images
__UpperCAmelCase: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Optional[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Optional[int] = self.get_dummy_components()
__UpperCAmelCase: Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Dict = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Dict = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Tuple = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Union[str, Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Dict = self.get_dummy_components()
__UpperCAmelCase: List[Any] = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , skip_prk_steps=snake_case_ )
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: str = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Optional[Any] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: int = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Dict = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , snake_case_=0 ):
'''simple docstring'''
__UpperCAmelCase: Any = torch.manual_seed(snake_case_ )
__UpperCAmelCase: Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: Tuple = self.get_inputs()
__UpperCAmelCase: Tuple = pipe(**snake_case_ ).images
__UpperCAmelCase: List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCAmelCase: int = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=snake_case_ )
__UpperCAmelCase: int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: str = self.get_inputs()
__UpperCAmelCase: str = pipe(**snake_case_ ).images
__UpperCAmelCase: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCAmelCase: str = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = 0
def callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
__UpperCAmelCase: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCAmelCase: Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase: Any = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCAmelCase: str = latents[0, -3:, -3:, -1]
__UpperCAmelCase: List[Any] = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase: Union[str, Any] = False
__UpperCAmelCase: Optional[Any] = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
__UpperCAmelCase: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: Optional[Any] = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase: Optional[Any] = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[int] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: Any = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
__UpperCAmelCase: Tuple = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase: Optional[Any] = self.get_inputs()
__UpperCAmelCase: Union[str, Any] = pipe(**snake_case_ )
__UpperCAmelCase: Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 523
| 1
|
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [
[],
[],
[],
]
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int ):
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__UpperCamelCase )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self : str ):
'''simple docstring'''
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = []
def _lowerCamelCase ( self : str , __snake_case : int ):
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__UpperCamelCase )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
UpperCAmelCase_ : Dict = min(self.queue )
self.queue.remove(__UpperCamelCase )
return data
def __str__( self : Optional[Any] ):
'''simple docstring'''
return str(self.queue )
def snake_case_ ( ):
UpperCAmelCase_ : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Any = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12
|
from __future__ import annotations
SCREAMING_SNAKE_CASE : Tuple = 1.6_021E-19 # units = C
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
import os
def A( ):
"""simple docstring"""
with open(os.path.dirname(snake_case_ ) + "/p022_names.txt" ) as file:
lowercase__: str = str(file.readlines()[0] )
lowercase__: str = names.replace("\"" , "" ).split("," )
names.sort()
lowercase__: Any = 0
lowercase__: List[str] = 0
for i, name in enumerate(snake_case_ ):
for letter in name:
name_score += ord(snake_case_ ) - 64
total_score += (i + 1) * name_score
lowercase__: Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 120
| 0
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="attention" ):
"""simple docstring"""
lowercase_ : Union[str, Any] = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase_ : Dict = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase_ : int = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase_ : Tuple = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
if split_mlp_wi:
lowercase_ : Optional[int] = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase_ : Optional[int] = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase_ : int = (wi_a, wi_a)
else:
lowercase_ : Union[str, Any] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase_ : Tuple = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , *, _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = traverse_util.flatten_dict(variables["target"] )
lowercase_ : Optional[Any] = {"/".join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ : str = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCamelCase )
lowercase_ : int = collections.OrderedDict()
# Shared embeddings.
lowercase_ : str = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ : int = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "attention" )
lowercase_ : int = layer_norm
lowercase_ : str = k.T
lowercase_ : List[Any] = o.T
lowercase_ : Optional[Any] = q.T
lowercase_ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
lowercase_ : Any = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ : Any = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , _UpperCamelCase )
lowercase_ : str = layer_norm
if split_mlp_wi:
lowercase_ : Tuple = wi[0].T
lowercase_ : List[str] = wi[1].T
else:
lowercase_ : List[Any] = wi.T
lowercase_ : int = wo.T
lowercase_ : Any = old[
"encoder/relpos_bias/rel_embedding"
].T
lowercase_ : Optional[Any] = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ : List[str] = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "self_attention" )
lowercase_ : List[Any] = layer_norm
lowercase_ : int = k.T
lowercase_ : str = o.T
lowercase_ : Union[str, Any] = q.T
lowercase_ : int = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ : int = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ : str = layer_norm
lowercase_ : str = k.T
lowercase_ : Tuple = o.T
lowercase_ : List[str] = q.T
lowercase_ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowercase_ : Optional[Any] = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ : Tuple = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , _UpperCamelCase )
lowercase_ : Tuple = layer_norm
if split_mlp_wi:
lowercase_ : str = wi[0].T
lowercase_ : int = wi[1].T
else:
lowercase_ : List[Any] = wi.T
lowercase_ : Dict = wo.T
lowercase_ : int = old["decoder/decoder_norm/scale"]
lowercase_ : Optional[int] = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ : Any = old["decoder/logits_dense/kernel"].T
return new
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ : List[Any] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ : List[str] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ : Union[str, Any] = state_dict["shared.weight"]
return state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = checkpoints.load_tax_checkpoint(_UpperCamelCase )
lowercase_ : Any = convert_tax_to_pytorch(_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase )
lowercase_ : int = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
"""simple docstring"""
lowercase_ : int = TaConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ : int = TaEncoderModel(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print("Done" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
UpperCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 620
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _snake_case ( snake_case__ : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
raise NotImplementedError()
| 544
| 0
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
snake_case__ =question_encoder
snake_case__ =generator
snake_case__ =self.question_encoder
def _lowercase ( self , _UpperCAmelCase ) -> Tuple:
if os.path.isfile(_UpperCAmelCase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
snake_case__ =os.path.join(_UpperCAmelCase , 'question_encoder_tokenizer' )
snake_case__ =os.path.join(_UpperCAmelCase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(_UpperCAmelCase )
self.generator.save_pretrained(_UpperCAmelCase )
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case__ =kwargs.pop('config' , _UpperCAmelCase )
if config is None:
snake_case__ =RagConfig.from_pretrained(_UpperCAmelCase )
snake_case__ =AutoTokenizer.from_pretrained(
_UpperCAmelCase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case__ =AutoTokenizer.from_pretrained(
_UpperCAmelCase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=_UpperCAmelCase , generator=_UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
return self.current_tokenizer(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowercase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
return self.generator.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowercase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
return self.generator.decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.question_encoder
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.generator
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "longest" , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , _UpperCAmelCase , )
if max_length is None:
snake_case__ =self.current_tokenizer.model_max_length
snake_case__ =self(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case__ =self.current_tokenizer.model_max_length
snake_case__ =self(
text_target=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case__ =labels['input_ids']
return model_inputs
| 538
|
'''simple docstring'''
import os
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def a ( UpperCamelCase_ : str ) -> int:
snake_case__ =0
snake_case__ =0
while index < len(UpperCamelCase_ ) - 1:
snake_case__ =SYMBOLS[numerals[index]]
snake_case__ =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a ( UpperCamelCase_ : int ) -> str:
snake_case__ =''
snake_case__ =num // 1000
numerals += m_count * "M"
num %= 1000
snake_case__ =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
snake_case__ =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a ( UpperCamelCase_ : str = "/p089_roman.txt" ) -> int:
snake_case__ =0
with open(os.path.dirname(UpperCamelCase_ ) + roman_numerals_filename ) as filea:
snake_case__ =filea.readlines()
for line in lines:
snake_case__ =line.strip()
snake_case__ =parse_roman_numerals(UpperCamelCase_ )
snake_case__ =generate_roman_numerals(UpperCamelCase_ )
savings += len(UpperCamelCase_ ) - len(UpperCamelCase_ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 538
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase : str = nn.Parameter(SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase : List[str] = nn.Parameter(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : int = np.asarray(weights[0] )
lowerCAmelCase : Any = np.asarray(weights[1] )
lowerCAmelCase : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = np.asarray(weights[0] )
lowerCAmelCase : Tuple = np.asarray(weights[1] )
lowerCAmelCase : Union[str, Any] = np.asarray(weights[2] )
lowerCAmelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Any = weights[0][0][0]
lowerCAmelCase : List[str] = np.asarray(layer_norm_a[0] )
lowerCAmelCase : Dict = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
lowerCAmelCase : Tuple = weights[0][1]
if len(SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
# intermediate weighs
lowerCAmelCase : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE ) == 4:
lowerCAmelCase : Optional[int] = intermediate_weights[2]
# layernorm 2
lowerCAmelCase : int = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate dense
lowerCAmelCase : Dict = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate out
lowerCAmelCase : int = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase : Any = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = torch_model.reformer
# word embeds
lowerCAmelCase : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase : List[Any] = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# output layer norm
lowerCAmelCase : int = np.asarray(weights[7][0] )
lowerCAmelCase : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# output embeddings
lowerCAmelCase : str = np.asarray(weights[9][0] )
lowerCAmelCase : List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase : Optional[int] = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
lowerCAmelCase : int = pickle.load(SCREAMING_SNAKE_CASE )["weights"]
set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 717
|
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681
| 0
|
'''simple docstring'''
import os
import sys
UpperCamelCase__ = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase__ = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
return AutoConfig.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return AutoTokenizer.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return AutoModel.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 75
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75
| 1
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase : str = logging.getLogger(__name__)
lowerCAmelCase : Optional[int] = '''pytorch_model.bin'''
@dataclasses.dataclass
class UpperCAmelCase__ :
a : Union[str, Any] = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
a : Optional[int] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class UpperCAmelCase__ :
a : Optional[int] = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[Any] = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
a : str = dataclasses.field(
default=lowercase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : List[Any] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """The name of the task to train on."""} , )
a : Optional[int] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class UpperCAmelCase__ :
a : List[Any] = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
a : List[Any] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
a : Optional[Any] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
a : int = dataclasses.field(
default=1_0 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
a : List[Any] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
a : Optional[int] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
a : int = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
a : int = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
a : Any = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
a : Tuple = dataclasses.field(
default=1_0_0 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
a : int = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Random seed for initialization."""} , )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__lowerCAmelCase = dataset.filter(lambda lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowerCAmelCase = int(eval_result * len(lowerCamelCase_ ) )
print(lowerCamelCase_ )
__lowerCAmelCase = dataset.sort("probability" , reverse=lowerCamelCase_ )
__lowerCAmelCase = dataset.select(range(lowerCamelCase_ ) )
__lowerCAmelCase = dataset.remove_columns(["label", "probability"] )
__lowerCAmelCase = dataset.rename_column("prediction" , "label" )
__lowerCAmelCase = dataset.map(lambda lowerCamelCase : {"label": idalabel[example["label"]]} )
__lowerCAmelCase = dataset.shuffle(seed=args.seed )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCamelCase_ , index=lowerCamelCase_ )
else:
dataset.to_json(lowerCamelCase_ )
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : int , **lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowerCAmelCase = STModelArguments(model_name_or_path=lowerCamelCase_ )
__lowerCAmelCase = STDataArguments(train_file=lowerCamelCase_ , infer_file=lowerCamelCase_ )
__lowerCAmelCase = STTrainingArguments(output_dir=lowerCamelCase_ )
__lowerCAmelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCamelCase_ ).items():
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for key, value in kwargs.items():
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Sanity checks
__lowerCAmelCase = {}
__lowerCAmelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowerCAmelCase = args.train_file
__lowerCAmelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowerCAmelCase = args.eval_file
for key in data_files:
__lowerCAmelCase = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__lowerCAmelCase = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__lowerCAmelCase = f'''{args.output_dir}/self-train_iter-{{}}'''.format
__lowerCAmelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
accelerator.wait_for_everyone()
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = False
# Show the progress bar
__lowerCAmelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__lowerCAmelCase = data_dir_format(lowerCamelCase_ )
assert os.path.exists(lowerCamelCase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "stage-1" )
__lowerCAmelCase = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCamelCase_ , lowerCamelCase_ ):
arguments_dict.update({key: value} )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "best-checkpoint" , lowerCamelCase_ )
if os.path.exists(lowerCamelCase_ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , lowerCamelCase_ , lowerCamelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , lowerCamelCase_ )
finetune(**lowerCamelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase_ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , lowerCamelCase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "best-checkpoint" )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "stage-2" )
# Update arguments_dict
__lowerCAmelCase = model_path
__lowerCAmelCase = data_files["""train"""]
__lowerCAmelCase = current_output_dir
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "best-checkpoint" , lowerCamelCase_ )
if os.path.exists(lowerCamelCase_ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , lowerCamelCase_ , lowerCamelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , lowerCamelCase_ )
finetune(**lowerCamelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase_ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , lowerCamelCase_ )
__lowerCAmelCase = iteration
__lowerCAmelCase = data_dir_format(iteration + 1 )
__lowerCAmelCase = AutoConfig.from_pretrained(os.path.join(lowerCamelCase_ , "best-checkpoint" ) )
__lowerCAmelCase = config.idalabel
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "eval_results_best-checkpoint.json" )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "test_results_best-checkpoint.json" )
assert os.path.exists(lowerCamelCase_ )
with open(lowerCamelCase_ , "r" ) as f:
__lowerCAmelCase = float(json.load(lowerCamelCase_ )[args.eval_metric] )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(lowerCamelCase_ )
# Loading the dataset from local csv or json files.
__lowerCAmelCase = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["""data"""]
__lowerCAmelCase = load_dataset("csv" , data_files={"data": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowerCamelCase_ ):
shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
accelerator.wait_for_everyone()
__lowerCAmelCase = os.path.join(lowerCamelCase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowerCAmelCase = eval_result
if best_iteration is None:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
__lowerCAmelCase = 0
else:
if new_eval_result == best_eval_result:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowerCAmelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , lowerCamelCase_ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase_ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCamelCase_ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase_ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCamelCase_ , "eval_results_best-iteration.json" ) , )
| 712
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 0
|
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
UpperCamelCase : List[Any] = (
"""Expected a list of numbers as input, found """
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : List[Any] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Missing an input""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
def snake_case__ ( lowercase ):
lowerCAmelCase_: Union[str, Any] = [1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: int = 0, 0, 0
lowerCAmelCase_: Union[str, Any] = ugly_nums[ia] * 2
lowerCAmelCase_: str = ugly_nums[ia] * 3
lowerCAmelCase_: Dict = ugly_nums[ia] * 5
for _ in range(1 , lowercase ):
lowerCAmelCase_: Any = min(lowercase , lowercase , lowercase )
ugly_nums.append(lowercase )
if next_num == next_a:
ia += 1
lowerCAmelCase_: str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase_: Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase_: int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 613
| 0
|
def __lowerCamelCase ( __a : dict ) -> set:
_lowercase =set()
# edges = list of graph's edges
_lowercase =get_edges(__a )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowercase , _lowercase =edges.pop()
chosen_vertices.add(__a )
chosen_vertices.add(__a )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__a )
return chosen_vertices
def __lowerCamelCase ( __a : dict ) -> set:
_lowercase =set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 594
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _a ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self , lowerCAmelCase_=2000 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=1e-3 ):
_lowercase =None
_lowercase =None
_lowercase =None
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =torch.linspace(1 , self.config.sampling_eps , lowerCAmelCase_ , device=lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(lowerCAmelCase_ )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=lowerCAmelCase_ , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 594
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Any , snake_case_ : int ):
"""simple docstring"""
super().__init__()
A : List[Any] = nn.ModuleList(__UpperCamelCase )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[str] = None , snake_case_ : Optional[int] = None , snake_case_ : List[Any] = None , snake_case_ : Dict = None , snake_case_ : List[Any] = False , snake_case_ : str = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(__UpperCamelCase , __UpperCamelCase , self.nets ) ):
A : str = controlnet(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# merge samples
if i == 0:
A : Optional[Any] = down_samples, mid_sample
else:
A : Any = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__UpperCamelCase , __UpperCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _UpperCAmelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] = True , snake_case_ : Optional[Any] = None , snake_case_ : str = False , snake_case_ : Union[str, Any] = None , ):
"""simple docstring"""
A : Any = 0
A : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__UpperCamelCase , is_main_process=__UpperCamelCase , save_function=__UpperCamelCase , safe_serialization=__UpperCamelCase , variant=__UpperCamelCase , )
idx += 1
A : List[Any] = model_path_to_save + f"""_{idx}"""
@classmethod
def _UpperCAmelCase ( cls : List[str] , snake_case_ : Optional[int] , **snake_case_ : int ):
"""simple docstring"""
A : Optional[Any] = 0
A : List[str] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A : Optional[Any] = pretrained_model_path
while os.path.isdir(__UpperCamelCase ):
A : Optional[Any] = ControlNetModel.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
controlnets.append(__UpperCamelCase )
idx += 1
A : Optional[int] = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__UpperCamelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__UpperCamelCase ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__UpperCamelCase )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__UpperCamelCase )
| 256
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , __a ):
def A_ (self ) -> Tuple:
UpperCamelCase_ : Any = load_tool("""text-to-speech""" )
self.tool.setup()
def A_ (self ) -> Dict:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = self.tool("""hey""" )
UpperCamelCase_ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def A_ (self ) -> Dict:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase_ : Any = self.tool("""hey""" )
UpperCamelCase_ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 635
| 0
|
import argparse
UpperCAmelCase__ = "docs/source/_static/js/custom.js"
def _A( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase__ , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
__lowercase = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__lowercase = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
UpperCAmelCase__ = parser.parse_args()
update_custom_js(args.version)
| 362
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : float ) -> float:
"""simple docstring"""
return 0.0
def _A( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int ) -> tuple[int | float, int | float]:
'''simple docstring'''
__lowercase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowercase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.abs(np.fft.fft(UpperCamelCase__ ) )
__lowercase = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__lowercase = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(UpperCamelCase__ )
plt.show()
def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 362
| 1
|
def __UpperCAmelCase ( __a : Union[str, Any] ) -> int:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_a , _a : Optional[Any] = head.next, head
while fast and fast.next:
_a : List[Any] = fast.next.next
_a : List[str] = slow.next
_a : Any = slow.next
_a : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
_a : Union[str, Any] = None
while second:
_a : Union[str, Any] = second.next
_a : List[str] = node
_a : Optional[Any] = second
_a : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_a : Tuple = node.next
_a : Any = head.next
return True
def __UpperCAmelCase ( __a : Tuple ) -> List[Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_a : Dict = head
while fast and fast.next:
_a , _a : List[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
_a : Optional[int] = [slow.val]
while slow.next:
_a : Union[str, Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_a : List[Any] = cur.next
return True
def __UpperCAmelCase ( __a : str ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
_a : Tuple = {}
_a : List[Any] = 0
while head:
if head.val in d:
d[head.val].append(__a )
else:
_a : Dict = [pos]
_a : Dict = head.next
pos += 1
_a : Any = pos - 1
_a : List[str] = 0
for v in d.values():
if len(__a ) % 2 != 0:
middle += 1
else:
_a : Tuple = 0
for i in range(0 ,len(__a ) ):
if v[i] + v[len(__a ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14
| 1
|
'''simple docstring'''
from __future__ import annotations
class SCREAMING_SNAKE_CASE:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ) -> None:
"""simple docstring"""
__lowercase = order
# a_{0} ... a_{k}
__lowercase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__lowercase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__lowercase = [0.0] * self.order
# y[n-1] ... y[n-k]
__lowercase = [0.0] * self.order
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
"""simple docstring"""
if len(lowerCamelCase__ ) < self.order:
__lowercase = [1.0, *a_coeffs]
if len(lowerCamelCase__ ) != self.order + 1:
__lowercase = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(lowerCamelCase__ )}'
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != self.order + 1:
__lowercase = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(lowerCamelCase__ )}'
)
raise ValueError(lowerCamelCase__ )
__lowercase = a_coeffs
__lowercase = b_coeffs
def snake_case__ ( self , lowerCamelCase__ ) -> float:
"""simple docstring"""
__lowercase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__lowercase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__lowercase = self.input_history[:-1]
__lowercase = self.output_history[:-1]
__lowercase = sample
__lowercase = result
return result
| 718
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = 1
__lowercase = 3
__lowercase = (32, 32)
__lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase__ )
return image
@property
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.dummy_cond_unet_upscale
__lowercase = DDPMScheduler()
__lowercase = DDIMScheduler(prediction_type="""v_prediction""" )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase__ , low_res_scheduler=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , max_noise_level=350 , )
__lowercase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowercase = sd_pipe(
[prompt] , image=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images
__lowercase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowercase = sd_pipe(
[prompt] , image=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCamelCase__ , )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
__lowercase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowercase = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.dummy_cond_unet_upscale
__lowercase = DDPMScheduler()
__lowercase = DDIMScheduler(prediction_type="""v_prediction""" )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase__ , low_res_scheduler=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , max_noise_level=350 , )
__lowercase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images
assert image.shape[0] == 2
__lowercase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowercase = sd_pipe(
[prompt] , image=lowerCamelCase__ , generator=lowerCamelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = self.dummy_cond_unet_upscale
__lowercase = DDPMScheduler()
__lowercase = DDIMScheduler(prediction_type="""v_prediction""" )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowercase = unet.half()
__lowercase = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase__ , low_res_scheduler=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , max_noise_level=350 , )
__lowercase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe(
[prompt] , image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="""np""" , ).images
__lowercase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
__lowercase = """stabilityai/stable-diffusion-x4-upscaler"""
__lowercase = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__lowercase = """a cat sitting on a park bench"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
__lowercase = """stabilityai/stable-diffusion-x4-upscaler"""
__lowercase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__lowercase = """a cat sitting on a park bench"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__lowercase = """stabilityai/stable-diffusion-x4-upscaler"""
__lowercase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase = """a cat sitting on a park bench"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , output_type="""np""" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 163
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__A : List[str] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : str , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 16
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 16
| 1
|
import qiskit
def __UpperCamelCase ( a, a) ->qiskit.result.counts.Counts:
lowerCamelCase__ = qiskit.Aer.get_backend("aer_simulator")
# Create a Quantum Circuit acting on the q register
lowerCamelCase__ = qiskit.QuantumCircuit(a, a)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1])
# Execute the circuit on the qasm simulator
lowerCamelCase__ = qiskit.execute(a, a, shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a)
if __name__ == "__main__":
A_ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 710
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ = logging.get_logger(__name__)
A_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = "longformer"
def __init__( self , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0522 , _lowerCAmelCase = 768 , _lowerCAmelCase = 12 , _lowerCAmelCase = 12 , _lowerCAmelCase = 3072 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ = attention_window
lowerCamelCase__ = sep_token_id
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = onnx_export
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = True
@property
def __magic_name__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __magic_name__ ( self ):
lowerCamelCase__ = super().outputs
if self.task == "default":
lowerCamelCase__ = {0: "batch"}
return outputs
@property
def __magic_name__ ( self ):
return 1E-4
@property
def __magic_name__ ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
lowerCamelCase__ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowerCamelCase__ = 1
return inputs
| 360
| 0
|
'''simple docstring'''
lowerCamelCase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase = frozenset([])
lowerCamelCase = frozenset(["""image"""])
lowerCamelCase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase = frozenset(["""image"""])
lowerCamelCase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase = frozenset(["""prompt""", """image""", """negative_prompt"""])
lowerCamelCase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
lowerCamelCase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase = frozenset(["""image""", """mask_image"""])
lowerCamelCase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase = frozenset(["""example_image""", """image""", """mask_image"""])
lowerCamelCase = frozenset(["""class_labels"""])
lowerCamelCase = frozenset(["""class_labels"""])
lowerCamelCase = frozenset(["""batch_size"""])
lowerCamelCase = frozenset([])
lowerCamelCase = frozenset(["""batch_size"""])
lowerCamelCase = frozenset([])
lowerCamelCase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase = frozenset(["""input_tokens"""])
lowerCamelCase = frozenset(["""input_tokens"""])
| 474
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """conditional_detr"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=3_0_0 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Optional[int]=2_0_4_8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : Optional[int]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=1.0 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]="sine" , _lowerCAmelCase : str="resnet50" , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.25 , **_lowerCAmelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowercase =CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =backbone_config.get('model_type')
__lowercase =CONFIG_MAPPING[backbone_model_type]
__lowercase =config_class.from_dict(_lowerCAmelCase)
__lowercase =use_timm_backbone
__lowercase =backbone_config
__lowercase =num_channels
__lowercase =num_queries
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =init_xavier_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =encoder_layers
__lowercase =auxiliary_loss
__lowercase =position_embedding_type
__lowercase =backbone
__lowercase =use_pretrained_backbone
__lowercase =dilation
# Hungarian matcher
__lowercase =class_cost
__lowercase =bbox_cost
__lowercase =giou_cost
# Loss coefficients
__lowercase =mask_loss_coefficient
__lowercase =dice_loss_coefficient
__lowercase =cls_loss_coefficient
__lowercase =bbox_loss_coefficient
__lowercase =giou_loss_coefficient
__lowercase =focal_alpha
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.d_model
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowercase =self.backbone_config.to_dict()
__lowercase =self.__class__.model_type
return output
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 1e-5
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return 1_2
| 474
| 1
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_A : Tuple ={'''UserAgent''': UserAgent().random}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict:
lowerCamelCase__ : List[Any] = script.contents[0]
lowerCamelCase__ : str = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowercase :
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = F'''https://www.instagram.com/{username}/'''
lowerCamelCase__ : Dict = self.get_json()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = requests.get(self.url , headers=UpperCamelCase__ ).text
lowerCamelCase__ : Tuple = BeautifulSoup(UpperCamelCase__ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Union[str, Any] ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[str] ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["username"]
@property
def lowerCamelCase_ ( self: Any ):
return self.user_data["full_name"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["biography"]
@property
def lowerCamelCase_ ( self: str ):
return self.user_data["business_email"]
@property
def lowerCamelCase_ ( self: int ):
return self.user_data["external_url"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase_ ( self: List[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return self.user_data["is_verified"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "github" ) -> None:
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCamelCase__ : int = InstagramUser(UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Dict =InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 713
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631
| 0
|
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = " " ) -> list:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for index, char in enumerate(lowerCAmelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE__ = index + 1
elif index + 1 == len(lowerCAmelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 100
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 10, __snake_case : int = 22 ) -> int:
"""simple docstring"""
A__ : Any =range(1, __snake_case )
A__ : List[str] =range(1, __snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 215
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_features', 'is_longer']
def __init__( self ,__UpperCamelCase=64 ,__UpperCamelCase=4_8000 ,__UpperCamelCase=480 ,__UpperCamelCase=10 ,__UpperCamelCase=1024 ,__UpperCamelCase=0.0 ,__UpperCamelCase=False ,__UpperCamelCase = 0 ,__UpperCamelCase = 1_4000 ,__UpperCamelCase = None ,__UpperCamelCase = "fusion" ,__UpperCamelCase = "repeatpad" ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Union[str, Any] = top_db
lowercase_ : Any = truncation
lowercase_ : str = padding
lowercase_ : Optional[Any] = fft_window_size
lowercase_ : List[Any] = (fft_window_size >> 1) + 1
lowercase_ : Any = hop_length
lowercase_ : List[Any] = max_length_s
lowercase_ : Any = max_length_s * sampling_rate
lowercase_ : Optional[int] = sampling_rate
lowercase_ : List[Any] = frequency_min
lowercase_ : str = frequency_max
lowercase_ : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__UpperCamelCase ,min_frequency=__UpperCamelCase ,max_frequency=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,norm=__UpperCamelCase ,mel_scale='htk' ,)
lowercase_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__UpperCamelCase ,min_frequency=__UpperCamelCase ,max_frequency=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,norm='slaney' ,mel_scale='slaney' ,)
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowercase_ : int = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> np.ndarray:
'''simple docstring'''
lowercase_ : Union[str, Any] = spectrogram(
__UpperCamelCase ,window_function(self.fft_window_size ,'hann' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=__UpperCamelCase ,log_mel='dB' ,)
return log_mel_spectrogram.T
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase_ : Optional[int] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase_ : List[str] = [0]
# randomly choose index for each part
lowercase_ : List[str] = np.random.choice(ranges[0] )
lowercase_ : Optional[Any] = np.random.choice(ranges[1] )
lowercase_ : Union[str, Any] = np.random.choice(ranges[2] )
lowercase_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
lowercase_ : str = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowercase_ : Tuple = torch.tensor(mel[None, None, :] )
lowercase_ : Optional[Any] = torch.nn.functional.interpolate(
__UpperCamelCase ,size=[chunk_frames, 64] ,mode='bilinear' ,align_corners=__UpperCamelCase )
lowercase_ : str = mel_shrink[0][0].numpy()
lowercase_ : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase_ : Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase_ : Any = len(__UpperCamelCase ) - max_length
lowercase_ : List[str] = np.random.randint(0 ,overflow + 1 )
lowercase_ : Any = waveform[idx : idx + max_length]
lowercase_ : Optional[int] = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase_ : str = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters )
lowercase_ : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase_ : Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase_ : int = np.stack([mel, mel, mel, mel] ,axis=0 )
lowercase_ : Optional[Any] = False
else:
lowercase_ : int = self._random_mel_fusion(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowercase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase_ : Optional[int] = int(max_length / len(__UpperCamelCase ) )
lowercase_ : Any = np.stack(np.tile(__UpperCamelCase ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase_ : Union[str, Any] = int(max_length / len(__UpperCamelCase ) )
lowercase_ : int = np.stack(np.tile(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : str = np.pad(__UpperCamelCase ,(0, max_length - waveform.shape[0]) ,mode='constant' ,constant_values=0 )
if truncation == "fusion":
lowercase_ : List[str] = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters )
lowercase_ : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowercase_ : Any = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchFeature:
'''simple docstring'''
lowercase_ : Union[str, Any] = truncation if truncation is not None else self.truncation
lowercase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase_ : Tuple = isinstance(__UpperCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowercase_ : List[str] = is_batched_numpy or (
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ : str = [np.asarray(__UpperCamelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase_ : List[str] = [
self._get_input_mel(__UpperCamelCase ,max_length if max_length else self.nb_max_samples ,__UpperCamelCase ,__UpperCamelCase )
for waveform in raw_speech
]
lowercase_ : int = []
lowercase_ : int = []
for mel, longer in padded_inputs:
input_mel.append(__UpperCamelCase )
is_longer.append(__UpperCamelCase )
if truncation == "fusion" and sum(__UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase_ : int = np.random.randint(0 ,len(__UpperCamelCase ) )
lowercase_ : Tuple = True
if isinstance(input_mel[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = [np.asarray(__UpperCamelCase ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase_ : Any = [[longer] for longer in is_longer]
lowercase_ : List[Any] = {'input_features': input_mel, 'is_longer': is_longer}
lowercase_ : Union[str, Any] = BatchFeature(__UpperCamelCase )
if return_tensors is not None:
lowercase_ : Any = input_features.convert_to_tensors(__UpperCamelCase )
return input_features
| 705
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase__( ):
lowercase_ : List[Any] = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase_ : int = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Let's go
lowercase_ : str = parser.parse_args()
if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase_ : Optional[Any] = args.func(__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 477
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74
| 1
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = len(_A )
for i in range(_A ):
for j in range(i + 1 , _A ):
if numbers[j] < numbers[i]:
lowerCAmelCase_ , lowerCAmelCase_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_A = input('''Enter numbers separated by a comma:\n''').strip()
_A = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 711
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __UpperCAmelCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self, UpperCamelCase__ = 1, UpperCamelCase__ = None, UpperCamelCase__ = 0.0, UpperCamelCase__ = 50, UpperCamelCase__ = "pil", UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=UpperCamelCase__, )
lowerCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase_ = self.scheduler.scale_model_input(UpperCamelCase__, UpperCamelCase__ )
# predict the noise residual
lowerCAmelCase_ = self.unet(UpperCamelCase__, UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase_ = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0, 1 )
lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 325
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __magic_name__ :
lowerCamelCase__ = 42
lowerCamelCase__ = None
lowerCamelCase__ = None
def A():
lowerCAmelCase_ = Node(1 )
lowerCAmelCase_ = Node(2 )
lowerCAmelCase_ = Node(3 )
lowerCAmelCase_ = Node(4 )
lowerCAmelCase_ = Node(5 )
return tree
def A(__a: Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A(__a: Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A(__a: Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A(__a: Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A(__a: Node | None ):
lowerCAmelCase_ = []
if root is None:
return output
lowerCAmelCase_ = deque([root] )
while process_queue:
lowerCAmelCase_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A(__a: Node | None , __a: int ):
lowerCAmelCase_ = []
def populate_output(__a: Node | None , __a: int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def A(__a: Node | None , __a: int ):
lowerCAmelCase_ = []
def populate_output(__a: Node | None , __a: int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def A(__a: Node | None ):
if root is None:
return []
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = height(__snake_case )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__snake_case , __snake_case ) )
lowerCAmelCase_ = 1
else:
output.append(get_nodes_from_right_to_left(__snake_case , __snake_case ) )
lowerCAmelCase_ = 0
return output
def A(): # Main function for testing.
lowerCAmelCase_ = make_tree()
print(F"In-order Traversal: {inorder(__snake_case )}" )
print(F"Pre-order Traversal: {preorder(__snake_case )}" )
print(F"Post-order Traversal: {postorder(__snake_case )}" , "\n" )
print(F"Height of Tree: {height(__snake_case )}" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(__snake_case ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(__snake_case ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(__snake_case , level=__snake_case ) )
print("\nZigZag order Traversal: " )
print(zigzag(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 122
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a ( __snake_case : float, __snake_case : float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608
| 0
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowercase : Optional[int] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowercase : Optional[Any] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowercase : Tuple = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowercase : List[Any] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowercase : Tuple = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCamelCase__ ( A : str , A : int ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase = k.replace(A , A )
return k
def lowerCamelCase__ ( A : dict , A : dict ):
'''simple docstring'''
UpperCAmelCase = BigBirdPegasusConfig(**A )
UpperCAmelCase = BigBirdPegasusForConditionalGeneration(A )
UpperCAmelCase = torch_model.state_dict()
UpperCAmelCase = {}
# separating decoder weights
UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
UpperCAmelCase = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
UpperCAmelCase = DECODER_PATTERNS
UpperCAmelCase = rename_state_dict_key(A , A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase = v.T
UpperCAmelCase = torch.from_numpy(A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
UpperCAmelCase = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
UpperCAmelCase = REMAINING_PATTERNS
UpperCAmelCase = rename_state_dict_key(A , A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase = v.T
UpperCAmelCase = torch.from_numpy(A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCAmelCase = mapping['''model.embed_positions.weight''']
UpperCAmelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCAmelCase , UpperCAmelCase = torch_model.load_state_dict(A , strict=A )
UpperCAmelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = tf.train.list_variables(A )
UpperCAmelCase = {}
UpperCAmelCase = ['''global_step''']
for name, shape in tqdm(A , desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase = tf.train.load_variable(A , A )
UpperCAmelCase = array
return tf_weights
def lowerCamelCase__ ( A : str , A : str , A : dict ):
'''simple docstring'''
UpperCAmelCase = get_tf_weights_as_numpy(A )
UpperCAmelCase = convert_bigbird_pegasus(A , A )
torch_model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowercase : Union[str, Any] = parser.parse_args()
_lowercase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( A = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_a : Union[str, Any] = BeautifulSoup(requests.get(A ).text , 'html.parser' )
_a : int = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(A , A )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 120
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
UpperCAmelCase__ : Any = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
UpperCAmelCase__ : str = """main"""
# Default branch name
UpperCAmelCase__ : str = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
UpperCAmelCase__ : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
UpperCAmelCase__ : Optional[Any] = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
UpperCAmelCase__ : Optional[int] = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowercase ( ) -> int:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __lowercase ( ) -> Union[str, Any]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class a__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] ) ->List[str]:
"""simple docstring"""
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[Any] ) ->Tuple:
"""simple docstring"""
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) ->Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _lowercase ( self : Any ) ->Any:
"""simple docstring"""
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""labels"""] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""start_positions""", """end_positions"""] )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""labels"""] )
@require_tf
def _lowercase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""labels"""] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""start_positions""", """end_positions"""] )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(UpperCAmelCase__ ) , ["""labels"""] )
@require_flax
def _lowercase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
pass
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
| 446
|
from __future__ import annotations
def __lowercase ( _A , _A ) -> list[str]:
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
SCREAMING_SNAKE_CASE : Optional[int] = int(_A )
SCREAMING_SNAKE_CASE : list[str] = []
for temp in range(int(_A ) ):
series.append(F"1 / {pow(temp + 1 , int(_A ) )}" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Dict = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCAmelCase__ : Optional[int] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 446
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.