code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if height >= 1:
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print("""moving disk from""" , _SCREAMING_SNAKE_CASE , """to""" , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = int(input("""Height of hanoi: """ ).strip() )
move_tower(_SCREAMING_SNAKE_CASE , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 341
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "deberta-v2"
def __init__(self , UpperCAmelCase=128100 , UpperCAmelCase=1536 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=6144 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase="gelu" , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = relative_attention
_snake_case = max_relative_positions
_snake_case = pad_token_id
_snake_case = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase ) == str:
_snake_case = [x.strip() for x in pos_att_type.lower().split("""|""" )]
_snake_case = pos_att_type
_snake_case = vocab_size
_snake_case = layer_norm_eps
_snake_case = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase )
_snake_case = pooler_dropout
_snake_case = pooler_hidden_act
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def lowercase (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowercase (self ) -> int:
return 12
def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 40 , UpperCAmelCase = 40 , UpperCAmelCase = None , ) -> Mapping[str, Any]:
_snake_case = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 341
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : int = len([g for position, g in enumerate(lowerCAmelCase__ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase__ ))
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
lowerCAmelCase_ : str = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase_ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = list(lowerCAmelCase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase_ : List[str] = random.choice(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase_ : int = int(parent_a[1] * 100 ) + 1
lowerCAmelCase_ : Any = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = population_score[random.randint(0 , lowerCAmelCase__ )][0]
lowerCAmelCase_ : Any = crossover(parent_a[0] , lowerCAmelCase__ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase__ , lowerCAmelCase__ ) )
pop.append(mutate(lowerCAmelCase__ , lowerCAmelCase__ ) )
return pop
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ) -> Optional[Any]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowerCAmelCase_ : Any = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCAmelCase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase_ : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase_ : Union[str, Any] = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCAmelCase__ )
# Generate random starting population.
lowerCAmelCase_ : Optional[Any] = []
for _ in range(lowerCAmelCase__ ):
population.append("".join([random.choice(lowerCAmelCase__ ) for i in range(len(lowerCAmelCase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase_ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase_ : Dict = [evaluate(lowerCAmelCase__ , lowerCAmelCase__ ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase_ : Tuple = sorted(lowerCAmelCase__ , key=lambda __UpperCamelCase : x[1] , reverse=lowerCAmelCase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase_ : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase__ )
# Normalize population score to be between 0 and 1.
lowerCAmelCase_ : Any = [
(item, score / len(lowerCAmelCase__ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase__ ):
population.extend(select(population_score[int(lowerCAmelCase__ )] , lowerCAmelCase__ , lowerCAmelCase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase__ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
lowercase__ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"""
)
lowercase__ , lowercase__ , lowercase__ = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 352
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowerCamelCase ( self : Any , a_ : Dict=0 ):
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(a_ ) )
lowerCAmelCase_ : Tuple = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[Any] = pipe(**a_ ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ : Union[str, Any] = pipe(**a_ ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs()
lowerCAmelCase_ : Tuple = pipe(**a_ ).images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[Any] = pipe(**a_ ).images
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[int] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[str] = pipe(**a_ ).images
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = ort.SessionOptions()
lowerCAmelCase_ : List[str] = False
return options
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : List[Any] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Dict = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = pipe(
prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" , )
lowerCAmelCase_ : Dict = output.images
lowerCAmelCase_ : Optional[int] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : Union[str, Any] = init_image.resize((1_28, 1_28) )
lowerCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
lowerCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Any = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = pipe(
prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=20 , generator=a_ , output_type="np" , )
lowerCAmelCase_ : List[str] = output.images
lowerCAmelCase_ : Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : List[str] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 161
| 0
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : List[Any]=9_9 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=5_1_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[Any]="last" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : str=0 , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Tuple = use_input_lengths
_lowerCamelCase : Optional[int] = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Optional[Any] = gelu_activation
_lowerCamelCase : List[Any] = sinusoidal_embeddings
_lowerCamelCase : List[Any] = causal
_lowerCamelCase : Any = asm
_lowerCamelCase : int = n_langs
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = n_special
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : Dict = num_choices
_lowerCamelCase : Tuple = summary_type
_lowerCamelCase : List[Any] = use_proj
_lowerCamelCase : Tuple = scope
_lowerCamelCase : Union[str, Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Dict = None
if self.use_input_lengths:
_lowerCamelCase : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
_lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : str = XLMWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Any = XLMForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
_lowerCamelCase : List[str] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = XLMForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
_lowerCamelCase : Dict = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
_lowerCamelCase : Dict = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((_lowerCamelCase) , ) : List[str] = result_with_labels.to_tuple()
_lowerCamelCase : str = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((_lowerCamelCase) , ) : Optional[int] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : List[str] = XLMForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = XLMForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_choices
_lowerCamelCase : List[str] = XLMForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case__ : Tuple = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str=False ):
"""simple docstring"""
_lowerCamelCase : int = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
_lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Tuple=1 ):
"""simple docstring"""
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(__lowerCAmelCase ) )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
_lowerCamelCase : Optional[Any] = min_length + idx + 1
_lowerCamelCase : Optional[Any] = min_length + idx + 1
_lowerCamelCase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : str=False , __lowerCAmelCase : Tuple=1 ):
"""simple docstring"""
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCAmelCase ) , )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
_lowerCamelCase : str = min_length + idx + 1
_lowerCamelCase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCAmelCase ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = XLMModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__lowerCAmelCase ) # the president
_lowerCamelCase : List[Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCAmelCase )
| 72
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10
| 0
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ = 'xlm-prophetnet'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , lowercase = 0.1 , lowercase = "gelu" , lowercase = 30522 , lowercase = 1024 , lowercase = 4096 , lowercase = 12 , lowercase = 16 , lowercase = 4096 , lowercase = 12 , lowercase = 16 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 0.0_2 , lowercase = True , lowercase = True , lowercase = 0 , lowercase = 2 , lowercase = 32 , lowercase = 128 , lowercase = False , lowercase = 0.0 , lowercase = True , lowercase = 0 , lowercase = 1 , lowercase = 2 , **lowercase , ) -> Optional[Any]:
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 366
|
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = str(lowerCamelCase__ )
lowerCamelCase_ = [n]
for i in range(1 , len(lowerCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(str(lowerCamelCase__ ) ) > 3:
if not is_prime(int(str(lowerCamelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase__ )[:3] ) ):
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ = 1_1 ):
lowerCamelCase_ = []
lowerCamelCase_ = 1_3
while len(lowerCamelCase__ ) != count:
if validate(lowerCamelCase__ ):
lowerCamelCase_ = list_truncated_nums(lowerCamelCase__ )
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase__ )
num += 2
return list_truncated_primes
def lowerCamelCase_ ( ):
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 47
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase__ : List[str] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
# save results
if os.path.exists(snake_case__ ):
if os.path.exists(os.path.join(snake_case__ , '''config.json''' ) ) and os.path.isfile(
os.path.join(snake_case__ , '''config.json''' ) ):
os.remove(os.path.join(snake_case__ , '''config.json''' ) )
if os.path.exists(os.path.join(snake_case__ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(snake_case__ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(snake_case__ , '''pytorch_model.bin''' ) )
else:
os.makedirs(snake_case__ )
model.save_pretrained(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=False ) -> Union[str, Any]:
lowerCAmelCase = 2
if unlogit:
lowerCAmelCase = torch.pow(snake_case__ , snake_case__ )
lowerCAmelCase = p * torch.log(snake_case__ )
lowerCAmelCase = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any:
logger.info('''lv, h >\t''' + '''\t'''.join(f"{x + 1}" for x in range(len(snake_case__ ) ) ) )
for row in range(len(snake_case__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__=False ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
lowerCAmelCase = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
if head_mask is None:
lowerCAmelCase = torch.ones(snake_case__ , snake_case__ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase = None
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for step, inputs in enumerate(tqdm(snake_case__ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase = model(snake_case__ , labels=snake_case__ , head_mask=snake_case__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case__ ):
lowerCAmelCase = entropy(attn.detach() , snake_case__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase = 2
lowerCAmelCase = torch.pow(torch.pow(snake_case__ , snake_case__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowerCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(snake_case__ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(snake_case__ )
logger.info('''Head ranked by importance scores''' )
lowerCAmelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase = head_ranks.view_as(snake_case__ )
print_ad_tensor(snake_case__ )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Dict:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ )
lowerCAmelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , snake_case__ , original_score * args.masking_threshold )
lowerCAmelCase = torch.ones_like(snake_case__ )
lowerCAmelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase = float('''Inf''' )
lowerCAmelCase = head_importance.view(-1 ).sort()[1]
if len(snake_case__ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCAmelCase = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase = new_head_mask.view(-1 )
lowerCAmelCase = 0.0
lowerCAmelCase = new_head_mask.view_as(snake_case__ )
lowerCAmelCase = new_head_mask.clone().detach()
print_ad_tensor(snake_case__ )
# Compute metric and head importance again
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , head_mask=snake_case__ )
lowerCAmelCase = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , snake_case__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('''Final head mask''' )
print_ad_tensor(snake_case__ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
lowerCAmelCase = datetime.now()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ )
lowerCAmelCase = 1 / loss
lowerCAmelCase = datetime.now() - before_time
lowerCAmelCase = sum(p.numel() for p in model.parameters() )
lowerCAmelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase = [
v,
]
assert sum(len(snake_case__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case__ )
lowerCAmelCase = sum(p.numel() for p in model.parameters() )
lowerCAmelCase = datetime.now()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ , actually_pruned=snake_case__ , )
lowerCAmelCase = 1 / loss
lowerCAmelCase = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , snake_case__ , snake_case__ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , snake_case__ , snake_case__ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 )
save_model(snake_case__ , args.output_dir )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=snake_case__ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=snake_case__ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=snake_case__ , type=snake_case__ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=snake_case__ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=snake_case__ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=snake_case__ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=snake_case__ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=snake_case__ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=snake_case__ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=snake_case__ , default=4_2 )
parser.add_argument('''--local_rank''' , type=snake_case__ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=snake_case__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=snake_case__ , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCAmelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase = torch.device('''cuda''' , args.local_rank )
lowerCAmelCase = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase = nn.parallel.DistributedDataParallel(
snake_case__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case__ )
elif args.n_gpu > 1:
lowerCAmelCase = nn.DataParallel(snake_case__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , snake_case__ )
# Prepare dataset
lowerCAmelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase = (torch.from_numpy(snake_case__ ),)
lowerCAmelCase = TensorDataset(*snake_case__ )
lowerCAmelCase = RandomSampler(snake_case__ )
lowerCAmelCase = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case__ , snake_case__ , snake_case__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase = mask_heads(snake_case__ , snake_case__ , snake_case__ )
prune_heads(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 338
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_0_0_0_0_0 , lowerCAmelCase__: int = 1_0 ):
"""simple docstring"""
UpperCAmelCase_: defaultdict = defaultdict(lowerCAmelCase__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_: Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_: Any = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 82
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82
| 1
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class UpperCamelCase__ ( A__):
UpperCAmelCase__ : Optional[Any] = """align_text_model"""
def __init__( self :int , _A :Union[str, Any]=30_522 , _A :Any=768 , _A :Any=12 , _A :Any=12 , _A :Union[str, Any]=3_072 , _A :List[str]="gelu" , _A :Optional[Any]=0.1 , _A :Optional[int]=0.1 , _A :str=512 , _A :Dict=2 , _A :Dict=0.02 , _A :int=1E-12 , _A :List[str]=0 , _A :Tuple="absolute" , _A :Any=True , **_A :Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a_ )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = pad_token_id
@classmethod
def lowercase_ ( cls :List[Any] , _A :Union[str, os.PathLike] , **_A :List[str] ) -> Optional[int]:
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
__A = cls.get_config_dict(a_ , **a_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_ , **a_ )
class UpperCamelCase__ ( A__):
UpperCAmelCase__ : Optional[int] = """align_vision_model"""
def __init__( self :Dict , _A :int = 3 , _A :int = 600 , _A :float = 2.0 , _A :float = 3.1 , _A :int = 8 , _A :List[int] = [3, 3, 5, 3, 5, 5, 3] , _A :List[int] = [32, 16, 24, 40, 80, 112, 192] , _A :List[int] = [16, 24, 40, 80, 112, 192, 320] , _A :List[int] = [] , _A :List[int] = [1, 2, 2, 2, 1, 2, 1] , _A :List[int] = [1, 2, 2, 3, 3, 4, 1] , _A :List[int] = [1, 6, 6, 6, 6, 6, 6] , _A :float = 0.25 , _A :str = "swish" , _A :int = 2_560 , _A :str = "mean" , _A :float = 0.02 , _A :float = 0.001 , _A :float = 0.99 , _A :float = 0.2 , **_A :Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(**a_ )
__A = num_channels
__A = image_size
__A = width_coefficient
__A = depth_coefficient
__A = depth_divisor
__A = kernel_sizes
__A = in_channels
__A = out_channels
__A = depthwise_padding
__A = strides
__A = num_block_repeats
__A = expand_ratios
__A = squeeze_expansion_ratio
__A = hidden_act
__A = hidden_dim
__A = pooling_type
__A = initializer_range
__A = batch_norm_eps
__A = batch_norm_momentum
__A = drop_connect_rate
__A = sum(a_ ) * 4
@classmethod
def lowercase_ ( cls :int , _A :Union[str, os.PathLike] , **_A :Any ) -> int:
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
__A = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_ , **a_ )
class UpperCamelCase__ ( A__):
UpperCAmelCase__ : Union[str, Any] = """align"""
UpperCAmelCase__ : int = True
def __init__( self :Union[str, Any] , _A :List[str]=None , _A :List[Any]=None , _A :Dict=640 , _A :Optional[int]=1.0 , _A :Tuple=0.02 , **_A :Dict , ) -> int:
'''simple docstring'''
super().__init__(**a_ )
if text_config is None:
__A = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
__A = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
__A = AlignTextConfig(**a_ )
__A = AlignVisionConfig(**a_ )
__A = projection_dim
__A = temperature_init_value
__A = initializer_range
@classmethod
def lowercase_ ( cls :List[Any] , _A :AlignTextConfig , _A :AlignVisionConfig , **_A :List[str] ) -> int:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
| 161
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = IFInpaintingSuperResolutionPipeline
a_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a_ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
a_ : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase ( self : Optional[Any] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any]=0 ):
if str(a_ ).startswith("mps" ):
lowerCAmelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCAmelCase_ : str = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCamelCase ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase ( self : List[str] ):
self._test_save_load_local()
def lowerCamelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 241
| 0
|
"""simple docstring"""
from math import loga
def lowercase ( lowerCAmelCase__ : Tuple ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11
| 0
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None):
require_version(deps[pkg] , _lowerCamelCase)
| 87
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : torch.FloatTensor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = 1
@register_to_config
def __init__( self :Optional[Any] , _A :int = 2_000 , _A :float = 0.15 , _A :float = 0.01 , _A :float = 1_348.0 , _A :float = 1E-5 , _A :int = 1 , ) -> Optional[Any]:
'''simple docstring'''
__A = sigma_max
# setable values
__A = None
self.set_sigmas(_A , _A , _A , _A )
def lowercase_ ( self :Optional[int] , _A :torch.FloatTensor , _A :Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase_ ( self :Tuple , _A :int , _A :float = None , _A :Union[str, torch.device] = None ) -> Optional[int]:
'''simple docstring'''
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__A = torch.linspace(1 , _A , _A , device=_A )
def lowercase_ ( self :Any , _A :int , _A :float = None , _A :float = None , _A :float = None ) -> Optional[int]:
'''simple docstring'''
__A = sigma_min if sigma_min is not None else self.config.sigma_min
__A = sigma_max if sigma_max is not None else self.config.sigma_max
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_A , _A )
__A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__A = torch.exp(torch.linspace(math.log(_A ) , math.log(_A ) , _A ) )
__A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase_ ( self :List[Any] , _A :Any , _A :Optional[Any] ) -> str:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowercase_ ( self :List[str] , _A :torch.FloatTensor , _A :int , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__A = timesteps.to(self.discrete_sigmas.device )
__A = self.discrete_sigmas[timesteps].to(sample.device )
__A = self.get_adjacent_sigma(_A , _A ).to(sample.device )
__A = torch.zeros_like(_A )
__A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__A = diffusion.unsqueeze(-1 )
__A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__A = randn_tensor(
sample.shape , layout=sample.layout , generator=_A , device=sample.device , dtype=sample.dtype )
__A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_A , prev_sample_mean=_A )
def lowercase_ ( self :str , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__A = randn_tensor(sample.shape , layout=sample.layout , generator=_A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__A = step_size.unsqueeze(-1 )
__A = sample + step_size * model_output
__A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def lowercase_ ( self :Any , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__A = timesteps.to(original_samples.device )
__A = self.discrete_sigmas.to(original_samples.device )[timesteps]
__A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_A ) * sigmas[:, None, None, None]
)
__A = noise + original_samples
return noisy_samples
def __len__( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 161
| 0
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 358
|
def _UpperCamelCase (a__ :dict ):
"""simple docstring"""
UpperCamelCase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCamelCase__ = set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def _UpperCamelCase (a__ :dict , a__ :int , a__ :set , a__ :set ):
"""simple docstring"""
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : str = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
|
'''simple docstring'''
class A__ :
def __init__( self : Union[str, Any] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =[0] * size
_SCREAMING_SNAKE_CASE =[0] * size
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def A ( self : Tuple , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =value
while index < self.size:
_SCREAMING_SNAKE_CASE =self.get_prev(_a ) + 1
if current_left_border == index:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =max(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.get_next(_a )
def A ( self : int , _a : int , _a : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_SCREAMING_SNAKE_CASE =0
while left <= right:
_SCREAMING_SNAKE_CASE =self.get_prev(_a )
if left <= current_left:
_SCREAMING_SNAKE_CASE =max(_a , self.tree[right] )
_SCREAMING_SNAKE_CASE =current_left
else:
_SCREAMING_SNAKE_CASE =max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 0
|
from pathlib import Path
import fire
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : int ) -> str:
__snake_case = Path(snake_case_ )
__snake_case = Path(snake_case_ )
dest_dir.mkdir(exist_ok=snake_case_ )
for path in src_dir.iterdir():
__snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
__snake_case = dest_dir.joinpath(path.name )
print(snake_case_ )
dest_path.open('''w''' ).write('''\n'''.join(snake_case_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 238
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ = data_utils.TransfoXLTokenizer
snake_case_ = data_utils.TransfoXLCorpus
snake_case_ = data_utils
snake_case_ = data_utils
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , '''rb''' ) as fp:
__snake_case = pickle.load(snake_case_ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__snake_case = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__snake_case = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
__snake_case = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case_ )
__snake_case = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__snake_case = os.path.abspath(snake_case_ )
__snake_case = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__snake_case = TransfoXLConfig()
else:
__snake_case = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case = TransfoXLLMHeadModel(snake_case_ )
__snake_case = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
__snake_case = os.path.join(snake_case_ , snake_case_ )
__snake_case = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
snake_case_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 238
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''timesformer'''
def __init__( self , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=8 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1e-6 , _snake_case=True , _snake_case="divided_space_time" , _snake_case=0 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = attention_type
_lowerCAmelCase = drop_path_rate
| 82
|
from collections.abc import Iterable
from typing import Generic, TypeVar
A__ = TypeVar("""_T""")
class __lowerCAmelCase ( Generic[_T] ):
def __init__( self , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = list(iterable or [] )
_lowerCAmelCase = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def snake_case ( self , _snake_case ):
"""simple docstring"""
self._stacka.append(_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self._stacka.pop
_lowerCAmelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82
| 1
|
__A =[
(1_0_0_0, '''M'''),
(9_0_0, '''CM'''),
(5_0_0, '''D'''),
(4_0_0, '''CD'''),
(1_0_0, '''C'''),
(9_0, '''XC'''),
(5_0, '''L'''),
(4_0, '''XL'''),
(1_0, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while place < len(snake_case__ ):
if (place + 1 < len(snake_case__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
for arabic, roman in ROMAN:
((lowerCamelCase_) , (lowerCamelCase_)) = divmod(snake_case__ , snake_case__ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from collections import defaultdict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('''Enter the first string ''').strip()
__A =input('''Enter the second string ''').strip()
__A =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 47
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, ) -> str:
'''simple docstring'''
snake_case_ = {}
if train_file is not None:
snake_case_ = [train_file]
if eval_file is not None:
snake_case_ = [eval_file]
if test_file is not None:
snake_case_ = [test_file]
snake_case_ = datasets.load_dataset('''csv''', data_files=__UpperCAmelCase )
snake_case_ = list(ds[list(files.keys() )[0]].features.keys() )
snake_case_ = features_name.pop(__UpperCAmelCase )
snake_case_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case_ = {label: i for i, label in enumerate(__UpperCAmelCase )}
snake_case_ = tokenizer.model_input_names
snake_case_ = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
snake_case_ = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase, padding='''max_length''' ), batched=__UpperCAmelCase, )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
snake_case_ = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=__UpperCAmelCase, max_length=__UpperCAmelCase, padding='''max_length''', ), batched=__UpperCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
snake_case_ = (
tf.data.Dataset.from_generator(
__UpperCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
__UpperCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
__UpperCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a : Tuple = logging.getLogger(__name__)
@dataclass
class a :
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class a :
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case_ ,snake_case_ ,snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=__UpperCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(__UpperCAmelCase ), labelaid=__UpperCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='''text-classification''', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('''.bin''' in model_args.model_name_or_path ), config=__UpperCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(__UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case_ = TFTrainer(
model=__UpperCAmelCase, args=__UpperCAmelCase, train_dataset=__UpperCAmelCase, eval_dataset=__UpperCAmelCase, compute_metrics=__UpperCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
with open(__UpperCAmelCase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 56
|
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =(UnCLIPScheduler,)
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Dict = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**_lowerCamelCase )
return config
def _a ( self ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def _a ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a ( self ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def _a ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a ( self ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: Optional[Any] = self.get_scheduler_config(variance_type='fixed_small_log' )
UpperCamelCase_: Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def _a ( self ):
UpperCamelCase_: Tuple = self.scheduler_classes[0]
UpperCamelCase_: str = self.get_scheduler_config(variance_type='learned_range' )
UpperCamelCase_: Tuple = scheduler_class(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_lowerCamelCase ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_lowerCamelCase ) - -0.0_0_1_0_0_1_1 < 1e-5
def _a ( self ):
UpperCamelCase_: Optional[int] = self.scheduler_classes[0]
UpperCamelCase_: str = self.get_scheduler_config()
UpperCamelCase_: Union[str, Any] = scheduler_class(**_lowerCamelCase )
UpperCamelCase_: Tuple = scheduler.timesteps
UpperCamelCase_: Any = self.dummy_model()
UpperCamelCase_: Any = self.dummy_sample_deter
UpperCamelCase_: Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
UpperCamelCase_: List[Any] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCamelCase_: Tuple = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
UpperCamelCase_: Optional[int] = pred_prev_sample
UpperCamelCase_: int = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCamelCase_: Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def _a ( self ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: List[Any] = self.get_scheduler_config()
UpperCamelCase_: Dict = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(2_5 )
UpperCamelCase_: Any = scheduler.timesteps
UpperCamelCase_: int = self.dummy_model()
UpperCamelCase_: int = self.dummy_sample_deter
UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
UpperCamelCase_: List[str] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
UpperCamelCase_: Optional[int] = None
else:
UpperCamelCase_: List[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase_: Optional[Any] = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
UpperCamelCase_: int = pred_prev_sample
UpperCamelCase_: Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCamelCase_: Dict = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def _a ( self ):
pass
def _a ( self ):
pass
| 292
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Dict = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''xglm'''
a : List[Any] =['''past_key_values''']
a : Union[str, Any] ={
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=2_5_6_0_0_8 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=2_4 , _lowerCamelCase=1_6 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: List[str] = d_model
UpperCamelCase_: List[Any] = ffn_dim
UpperCamelCase_: List[Any] = num_layers
UpperCamelCase_: List[Any] = attention_heads
UpperCamelCase_: Tuple = activation_function
UpperCamelCase_: Tuple = dropout
UpperCamelCase_: Tuple = attention_dropout
UpperCamelCase_: Optional[Any] = activation_dropout
UpperCamelCase_: List[str] = layerdrop
UpperCamelCase_: Any = init_std
UpperCamelCase_: Any = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase_: Union[str, Any] = use_cache
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 292
| 1
|
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28
|
UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87
| 0
|
lowerCamelCase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
lowerCamelCase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCamelCase_: str = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase_ : str = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Union[str, Any] = {
"""RUCAIBox/mvp""": 10_24,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
__UpperCamelCase : int = MvpTokenizer
def __init__( self : Union[str, Any] , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple="replace" , snake_case_ : Dict="<s>" , snake_case_ : Dict="</s>" , snake_case_ : Tuple="</s>" , snake_case_ : int="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Any="<mask>" , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
UpperCamelCase_: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase_: int = add_prefix_space
UpperCamelCase_: int = pre_tok_class(**snake_case_ )
UpperCamelCase_: int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_: Tuple = """post_processor"""
UpperCamelCase_: Optional[int] = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
UpperCamelCase_: Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_: Union[str, Any] = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase_: List[Any] = tuple(state["""cls"""] )
UpperCamelCase_: List[str] = False
if state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: str = add_prefix_space
UpperCamelCase_: Union[str, Any] = True
if state.get("""trim_offsets""" , snake_case_ ) != trim_offsets:
UpperCamelCase_: Optional[int] = trim_offsets
UpperCamelCase_: Any = True
if changes_to_apply:
UpperCamelCase_: Optional[int] = getattr(snake_case_ , state.pop("""type""" ) )
UpperCamelCase_: Union[str, Any] = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Dict ):
UpperCamelCase_: List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
UpperCamelCase_: Optional[int] = value
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : Dict , **snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[int] = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , *snake_case_ : Dict , **snake_case_ : Tuple ):
UpperCamelCase_: Any = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: Optional[int] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any=None ):
UpperCamelCase_: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Tuple = [self.sep_token_id]
UpperCamelCase_: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223
| 1
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : str =f'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowerCamelCase )
if number < 0:
return False
lowerCamelCase__ : int =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict )-> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[int], lowerCamelCase : int = 1, lowerCamelCase : int = 100, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : bool = True, )-> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
lowerCamelCase__ : int =self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : List[str] =audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : Any =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Optional[int] =int(lowerCamelCase )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Tuple =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
lowerCamelCase__ : int =int(lowerCamelCase )
lowerCamelCase__ : str =next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : str =randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
# set step values
self.scheduler.set_timesteps(lowerCamelCase, device=audio.device )
lowerCamelCase__ : Any =self.scheduler.timesteps.to(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : str =self.unet(lowerCamelCase, lowerCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : Dict =self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
lowerCamelCase__ : Optional[int] =audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : List[Any] =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase )
| 238
| 1
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=UpperCAmelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=UpperCAmelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=UpperCAmelCase_ , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : int = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
UpperCAmelCase : Any = dpr_record['question']
UpperCAmelCase : List[str] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(UpperCAmelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 358
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b < 0:
return 1 / actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
return actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 280
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 79
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) )
def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def A ( self : Tuple , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std
return embeds
def A ( self : List[str] , _a : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean
return embeds
| 47
| 0
|
"""simple docstring"""
from __future__ import annotations
__A = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =graph
# mapping node to its parent in resulting breadth first tree
_lowerCAmelCase ={}
_lowerCAmelCase =source_vertex
def _lowerCAmelCase ( self ) -> None:
_lowerCAmelCase ={self.source_vertex}
_lowerCAmelCase =None
_lowerCAmelCase =[self.source_vertex] # first in first out queue
while queue:
_lowerCAmelCase =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCAmelCase )
_lowerCAmelCase =vertex
queue.append(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
_lowerCAmelCase =self.parent.get(__UpperCAmelCase )
if target_vertex_parent is None:
_lowerCAmelCase =(
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__UpperCAmelCase )
return self.shortest_path(__UpperCAmelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__A = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 341
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :Tuple , __UpperCamelCase :int=13 , __UpperCamelCase :Tuple=7 , __UpperCamelCase :str=True , __UpperCamelCase :Any=True , __UpperCamelCase :Optional[Any]=False , __UpperCamelCase :Tuple=True , __UpperCamelCase :int=99 , __UpperCamelCase :str=32 , __UpperCamelCase :Dict=5 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Optional[Any]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Dict=0.1 , __UpperCamelCase :List[str]=5_12 , __UpperCamelCase :Dict=16 , __UpperCamelCase :Union[str, Any]=2 , __UpperCamelCase :List[str]=0.02 , __UpperCamelCase :Any=3 , __UpperCamelCase :Dict=4 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def lowerCamelCase ( self :Dict ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self :List[Any] ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :int , __UpperCamelCase :Tuple , __UpperCamelCase :Tuple , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any] ):
A = DistilBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , __UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :int , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] ):
A = DistilBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :str , __UpperCamelCase :int , __UpperCamelCase :int , __UpperCamelCase :Any , __UpperCamelCase :List[Any] ):
A = DistilBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Tuple , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :List[Any] , __UpperCamelCase :Union[str, Any] ):
A = self.num_labels
A = DistilBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[str] , __UpperCamelCase :List[str] , __UpperCamelCase :int , __UpperCamelCase :Tuple , __UpperCamelCase :int ):
A = self.num_labels
A = DistilBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :int , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :str , __UpperCamelCase :List[Any] ):
A = self.num_choices
A = DistilBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self :Optional[int] ):
A = self.prepare_config_and_inputs()
((A), (A), (A), (A), (A), (A)) = config_and_inputs
A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :Dict ):
A = DistilBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , dim=37 )
def lowerCamelCase ( self :Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase )
def lowerCamelCase ( self :Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :Dict ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = DistilBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
@require_torch_gpu
def lowerCamelCase ( self :List[Any] ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A = True
A = model_class(config=__UpperCamelCase )
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
A = torch.jit.trace(
__UpperCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , "traced_model.pt" ) )
A = torch.jit.load(os.path.join(__UpperCamelCase , "traced_model.pt" ) , map_location=__UpperCamelCase )
loaded(inputs_dict["input_ids"].to(__UpperCamelCase ) , inputs_dict["attention_mask"].to(__UpperCamelCase ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self :Tuple ):
A = DistilBertModel.from_pretrained("distilbert-base-uncased" )
A = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
A = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) )
| 292
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292
| 1
|
"""simple docstring"""
import sys
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
lowercase__: Optional[Any] = len(__lowerCAmelCase )
lowercase__: List[str] = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
lowercase__: List[Any] = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase__: str = a + chain_length - 1
lowercase__: Union[str, Any] = sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__: List[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase__: Optional[Any] = cost
lowercase__: Dict = c
return matrix, sol
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
lowercase__: Dict = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
lowercase__: Optional[Any] = len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase__: Optional[int] = matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2
| 0
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int = 10_00 ):
lowercase_ , lowercase_ :str = 1, 1
lowercase_ :Any = 2
while True:
lowercase_ :str = 0
lowercase_ :Tuple = fa + fa
lowercase_ , lowercase_ :Tuple = fa, f
index += 1
for _ in str(__lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 223
| 1
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
def __init__( self : Any ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
def A ( self : Any , _a : Tuple , _a : List[Any] , _a : Dict=1 ) -> str:
'''simple docstring'''
if self.graph.get(UpperCamelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_SCREAMING_SNAKE_CASE =[[w, v]]
if not self.graph.get(UpperCamelCase_ ):
_SCREAMING_SNAKE_CASE =[]
def A ( self : Any ) -> Dict:
'''simple docstring'''
return list(self.graph )
def A ( self : List[str] , _a : int , _a : int ) -> List[str]:
'''simple docstring'''
if self.graph.get(UpperCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase_ )
def A ( self : Optional[int] , _a : Union[str, Any]=-2 , _a : Union[str, Any]=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
if s == -2:
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase_ ) != 0:
_SCREAMING_SNAKE_CASE =stack[len(UpperCamelCase_ ) - 1]
else:
_SCREAMING_SNAKE_CASE =ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return visited
def A ( self : List[Any] , _a : Any=-1 ) -> int:
'''simple docstring'''
if c == -1:
_SCREAMING_SNAKE_CASE =floor(random() * 1_0000 ) + 10
for i in range(UpperCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_SCREAMING_SNAKE_CASE =floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase_ , UpperCamelCase_ , 1 )
def A ( self : Tuple , _a : Dict=-2 ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =deque()
_SCREAMING_SNAKE_CASE =[]
if s == -2:
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
d.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
while d:
_SCREAMING_SNAKE_CASE =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def A ( self : int , _a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def A ( self : int , _a : Any ) -> Any:
'''simple docstring'''
return len(self.graph[u] )
def A ( self : Any , _a : Any=-2 ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
if s == -2:
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase_ ) != 0:
_SCREAMING_SNAKE_CASE =stack[len(UpperCamelCase_ ) - 1]
else:
_SCREAMING_SNAKE_CASE =ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return sorted_nodes
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =-2
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE =len(UpperCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE =True
if len(UpperCamelCase_ ) != 0:
_SCREAMING_SNAKE_CASE =stack[len(UpperCamelCase_ ) - 1]
else:
_SCREAMING_SNAKE_CASE =False
indirect_parents.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return list(UpperCamelCase_ )
def A ( self : List[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =-2
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE =len(UpperCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE =True
if len(UpperCamelCase_ ) != 0:
_SCREAMING_SNAKE_CASE =stack[len(UpperCamelCase_ ) - 1]
else:
_SCREAMING_SNAKE_CASE =False
indirect_parents.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return False
def A ( self : List[str] , _a : str=-2 , _a : Optional[Any]=-1 ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =time()
self.dfs(UpperCamelCase_ , UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =time()
return end - begin
def A ( self : str , _a : List[str]=-2 ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =time()
self.bfs(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =time()
return end - begin
class A__ :
def __init__( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
def A ( self : Any , _a : Optional[Any] , _a : List[Any] , _a : Union[str, Any]=1 ) -> Dict:
'''simple docstring'''
if self.graph.get(UpperCamelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_SCREAMING_SNAKE_CASE =[[w, v]]
# add the other way
if self.graph.get(UpperCamelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_SCREAMING_SNAKE_CASE =[[w, u]]
def A ( self : Optional[Any] , _a : List[Any] , _a : Dict ) -> Any:
'''simple docstring'''
if self.graph.get(UpperCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase_ )
# the other way round
if self.graph.get(UpperCamelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase_ )
def A ( self : List[Any] , _a : Any=-2 , _a : str=-1 ) -> Optional[Any]:
'''simple docstring'''
if s == d:
return []
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
if s == -2:
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase_ ) != 0:
_SCREAMING_SNAKE_CASE =stack[len(UpperCamelCase_ ) - 1]
else:
_SCREAMING_SNAKE_CASE =ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return visited
def A ( self : List[str] , _a : str=-1 ) -> Tuple:
'''simple docstring'''
if c == -1:
_SCREAMING_SNAKE_CASE =floor(random() * 1_0000 ) + 10
for i in range(UpperCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_SCREAMING_SNAKE_CASE =floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase_ , UpperCamelCase_ , 1 )
def A ( self : Optional[Any] , _a : Any=-2 ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =deque()
_SCREAMING_SNAKE_CASE =[]
if s == -2:
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
d.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
while d:
_SCREAMING_SNAKE_CASE =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def A ( self : Optional[int] , _a : Optional[Any] ) -> List[str]:
'''simple docstring'''
return len(self.graph[u] )
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =-2
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE =len(UpperCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE =True
if len(UpperCamelCase_ ) != 0:
_SCREAMING_SNAKE_CASE =stack[len(UpperCamelCase_ ) - 1]
else:
_SCREAMING_SNAKE_CASE =False
indirect_parents.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return list(UpperCamelCase_ )
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =-2
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE =len(UpperCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE =True
if len(UpperCamelCase_ ) != 0:
_SCREAMING_SNAKE_CASE =stack[len(UpperCamelCase_ ) - 1]
else:
_SCREAMING_SNAKE_CASE =False
indirect_parents.append(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return False
def A ( self : Dict ) -> int:
'''simple docstring'''
return list(self.graph )
def A ( self : int , _a : Union[str, Any]=-2 , _a : Dict=-1 ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =time()
self.dfs(UpperCamelCase_ , UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =time()
return end - begin
def A ( self : Dict , _a : Dict=-2 ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =time()
self.bfs(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =time()
return end - begin
| 357
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114
| 0
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Optional[int] = TypeVar('T')
class UpperCamelCase__ (Generic[T] ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ = True ) -> Union[str, Any]:
lowerCamelCase : dict[T, list[T]] = {} # dictionary of lists
lowerCamelCase : str = directed
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_A )
self.adj_list[destination_vertex].append(_A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_A )
lowerCamelCase : Union[str, Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_A )
lowerCamelCase : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase : Optional[Any] = [destination_vertex]
lowerCamelCase : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_A )
lowerCamelCase : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase : Tuple = [destination_vertex]
lowerCamelCase : str = []
return self
def __repr__( self ) -> Optional[Any]:
return pformat(self.adj_list )
| 48
|
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A : Optional[int] = get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[str] = None):
_A : Dict = (
os.path.join(SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_A : Optional[Any] = Extractor
def A ( self : int , SCREAMING_SNAKE_CASE : str):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_A : Dict = os.path.abspath(SCREAMING_SNAKE_CASE)
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool):
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE) and not (os.path.isdir(SCREAMING_SNAKE_CASE) and os.listdir(SCREAMING_SNAKE_CASE))
)
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False):
_A : Optional[int] = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE)
if not extractor_format:
return input_path
_A : Optional[Any] = self._get_output_path(SCREAMING_SNAKE_CASE)
if self._do_extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return output_path
class __lowerCamelCase ( a_ ):
"""simple docstring"""
@classmethod
@abstractmethod
def A ( cls : Tuple , SCREAMING_SNAKE_CASE : Union[Path, str] , **SCREAMING_SNAKE_CASE : Dict):
...
@staticmethod
@abstractmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
...
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = []
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : int):
with open(SCREAMING_SNAKE_CASE , 'rb') as f:
return f.read(SCREAMING_SNAKE_CASE)
@classmethod
def A ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : bytes = b""):
if not magic_number:
_A : Tuple = max(len(SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers)
try:
_A : Union[str, Any] = cls.read_magic_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
@classmethod
def A ( cls : Tuple , SCREAMING_SNAKE_CASE : Union[Path, str] , **SCREAMING_SNAKE_CASE : Tuple):
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE)
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any):
def resolved(SCREAMING_SNAKE_CASE : str) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE))
def badpath(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)).startswith(SCREAMING_SNAKE_CASE)
def badlink(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str) -> bool:
# Links are interpreted relative to the directory containing the link
_A : List[Any] = resolved(os.path.join(SCREAMING_SNAKE_CASE , os.path.dirname(info.name)))
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE)
_A : Dict = resolved(SCREAMING_SNAKE_CASE)
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)')
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}')
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}')
else:
yield finfo
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE)
_A : List[Any] = tarfile.open(SCREAMING_SNAKE_CASE)
tar_file.extractall(SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
tar_file.close()
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [B"\x1F\x8B"]
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
with gzip.open(SCREAMING_SNAKE_CASE , 'rb') as gzip_file:
with open(SCREAMING_SNAKE_CASE , 'wb') as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [
B"PK\x03\x04",
B"PK\x05\x06", # empty archive
B"PK\x07\x08", # spanned archive
]
@classmethod
def A ( cls : str , SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : bytes = b""):
if super().is_extractable(SCREAMING_SNAKE_CASE , magic_number=SCREAMING_SNAKE_CASE):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE , 'rb') as fp:
_A : Union[str, Any] = _EndRecData(SCREAMING_SNAKE_CASE)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_A : Optional[int] = fp.read(SCREAMING_SNAKE_CASE) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE) == sizeCentralDir:
_A : List[str] = struct.unpack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE)
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'r') as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE)
zip_file.close()
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [B"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
with lzma.open(SCREAMING_SNAKE_CASE) as compressed_file:
with open(SCREAMING_SNAKE_CASE , 'wb') as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [B"Rar!\x1a\x07\x00", B"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile')
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE)
_A : str = rarfile.RarFile(SCREAMING_SNAKE_CASE)
rf.extractall(SCREAMING_SNAKE_CASE)
rf.close()
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [B"\x28\xb5\x2F\xFD"]
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard')
import zstandard as zstd
_A : List[Any] = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE , 'rb') as ifh, open(SCREAMING_SNAKE_CASE , 'wb') as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [B"\x42\x5A\x68"]
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
with bza.open(SCREAMING_SNAKE_CASE , 'rb') as compressed_file:
with open(SCREAMING_SNAKE_CASE , 'wb') as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [B"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr')
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE)
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE , 'r') as archive:
archive.extractall(SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = [B"\x04\x22\x4D\x18"]
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str]):
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4')
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE , 'rb') as compressed_file:
with open(SCREAMING_SNAKE_CASE , 'wb') as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
class __lowerCamelCase :
"""simple docstring"""
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A ( cls : int):
return max(
len(SCREAMING_SNAKE_CASE)
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def A ( SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : int):
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE , magic_number_length=SCREAMING_SNAKE_CASE)
except OSError:
return b""
@classmethod
def A ( cls : Tuple , SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : bool = False):
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE , )
_A : Any = cls.infer_extractor_format(SCREAMING_SNAKE_CASE)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A ( cls : str , SCREAMING_SNAKE_CASE : Union[Path, str]): # <Added version="2.4.0"/>
_A : Union[str, Any] = cls._get_magic_number_max_length()
_A : List[str] = cls._read_magic_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE , magic_number=SCREAMING_SNAKE_CASE):
return extractor_format
@classmethod
def A ( cls : List[str] , SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Union[Path, str] , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE) , exist_ok=SCREAMING_SNAKE_CASE)
# Prevent parallel extractions
_A : Any = str(Path(SCREAMING_SNAKE_CASE).with_suffix('.lock'))
with FileLock(SCREAMING_SNAKE_CASE):
shutil.rmtree(SCREAMING_SNAKE_CASE , ignore_errors=SCREAMING_SNAKE_CASE)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE , )
_A : Optional[int] = extractor if extractor != 'deprecated' else extractor_format
else:
_A : List[Any] = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=SCREAMING_SNAKE_CASE , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE):
return extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 227
|
'''simple docstring'''
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : list[list[int]]):
_A : Dict = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(SCREAMING_SNAKE_CASE) != 0:
_A : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(SCREAMING_SNAKE_CASE) != cols:
raise error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise error
_A : str = rows
else:
_A : Tuple = []
def A ( self : Optional[Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def A ( self : List[str]):
return len(self.rows)
@property
def A ( self : Optional[int]):
return len(self.rows[0])
@property
def A ( self : Optional[int]):
return (self.num_rows, self.num_columns)
@property
def A ( self : Any):
return self.order[0] == self.order[1]
def A ( self : int):
_A : Union[str, Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def A ( self : Tuple):
return bool(self.determinant())
def A ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(SCREAMING_SNAKE_CASE).determinant()
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
if (row + column) % 2 == 0:
return self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return -1 * self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
return Matrix(
[
[self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def A ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def A ( self : Tuple):
_A : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(SCREAMING_SNAKE_CASE)
def A ( self : Tuple):
_A : str = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : str):
return str(self.rows)
def __str__( self : Optional[int]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(SCREAMING_SNAKE_CASE) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int | None = None):
_A : Tuple = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise type_error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise type_error
if len(SCREAMING_SNAKE_CASE) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(SCREAMING_SNAKE_CASE)
else:
_A : List[str] = self.rows[0:position] + [row] + self.rows[position:]
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int | None = None):
_A : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise type_error
for value in column:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise type_error
if len(SCREAMING_SNAKE_CASE) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
_A : str = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
_A : Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[str] , SCREAMING_SNAKE_CASE : object):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , SCREAMING_SNAKE_CASE : object):
return not self == other
def __neg__( self : Optional[Any]):
return self * -1
def __add__( self : Any , SCREAMING_SNAKE_CASE : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : List[str] , SCREAMING_SNAKE_CASE : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Dict , SCREAMING_SNAKE_CASE : Matrix | int | float):
if isinstance(SCREAMING_SNAKE_CASE , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Any , SCREAMING_SNAKE_CASE : int):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
_A : Union[str, Any] = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def A ( cls : List[str] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int]):
return sum(row[i] * column[i] for i in range(len(SCREAMING_SNAKE_CASE)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = graph
# mapping node to its parent in resulting breadth first tree
_snake_case = {}
_snake_case = source_vertex
def lowercase (self ) -> None:
_snake_case = {self.source_vertex}
_snake_case = None
_snake_case = [self.source_vertex] # first in first out queue
while queue:
_snake_case = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
_snake_case = vertex
queue.append(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
_snake_case = self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
_snake_case = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCAmelCase = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 341
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase = TypeVar('T')
__lowerCAmelCase = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase = Union[str, bytes, os.PathLike]
| 341
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __A ( _lowercase ):
'''simple docstring'''
_A = os.path.join(args.tf_model_dir , '''parameters.json''' )
_A = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
_A = args.output + '''.pt'''
_A = OrderedDict()
with tf.device('''/CPU:0''' ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
_A = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
_A = 8
_A = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/moe''' ):
_A = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
_A = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
_A = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/mlp''' ):
_A = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/p1/bias''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/kernel''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/bias''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/ln''' ):
_A = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_A = '''model.blocks.%d.feed_forward.norm.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
_A = '''model.blocks.%d.feed_forward.norm.weight''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/att''' ):
_A = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
_A = torch.tensor(_lowercase )
_A = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
_A = torch.tensor(_lowercase )
_A = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/o/kernel''' ):
_A = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/an''' ):
_A = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_A = '''model.blocks.%d.self_attn.norm.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
_A = '''model.blocks.%d.self_attn.norm.weight''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
_A = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
_A = '''model.%s.weight''' % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(_lowercase )
if key_name.startswith('''model/wte''' ):
_A = '''lm_head.weight'''
_A = vnp.copy() # same in embedded
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/wob''' ):
_A = '''final_logits_bias'''
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
_A = '''model.last_project.weight'''
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
_A = '''model.last_project.bias'''
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
__A = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 75
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@require_torch
def __A ( self: Dict ) -> Optional[int]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Dict ) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Any ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Optional[int] ) -> Dict:
_A = '''
from transformers import pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_A = self.get_env()
_A = '''1'''
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __A ( self: Optional[int] ) -> int:
_A = '''
from transformers import AutoModel
'''
_A = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 75
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2
| 0
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase__ , lowercase__ , lowercase__ = False, False, False
@dataclass
class snake_case__ :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = None
# Automatically constructed
lowerCamelCase = "dict"
lowerCamelCase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCamelCase = field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.pa_type
def lowerCAmelCase ( self : str , UpperCamelCase__ : Union[str, bytes, dict] ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case : Tuple = BytesIO()
sf.write(lowerCAmelCase__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case : Optional[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
snake_case : Optional[int] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2767
snake_case : Any = BytesIO(bytes() )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : dict , UpperCamelCase__ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
snake_case : Optional[int] = (value["path"], BytesIO(value['''bytes'''] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
snake_case : Optional[Any] = xsplitext(lowerCAmelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ''' )
if file is None:
snake_case : Optional[int] = token_per_repo_id or {}
snake_case : str = path.split('''::''' )[-1]
try:
snake_case : List[str] = string_to_dict(lowerCAmelCase__ , config.HUB_DATASETS_URL )["repo_id"]
snake_case : Any = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case : List[Any] = None
with xopen(lowerCAmelCase__ , '''rb''' , use_auth_token=lowerCAmelCase__ ) as f:
snake_case : Optional[int] = sf.read(lowerCAmelCase__ )
else:
snake_case : Union[str, Any] = sf.read(lowerCAmelCase__ )
snake_case : List[Any] = array.T
if self.mono:
snake_case : List[Any] = librosa.to_mono(lowerCAmelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case : Optional[int] = librosa.resample(lowerCAmelCase__ , orig_sr=lowerCAmelCase__ , target_sr=self.sampling_rate )
snake_case : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCAmelCase ( self : Tuple ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
snake_case : Dict = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
snake_case : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case : str = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
snake_case : Optional[Any] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
snake_case : List[str] = pa.array([Audio().encode_example(lowerCAmelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case : Union[str, Any] = storage.field('''bytes''' )
else:
snake_case : Tuple = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case : Union[str, Any] = storage.field('''path''' )
else:
snake_case : Any = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
snake_case : int = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase__ : Optional[Any] ):
with xopen(lowerCAmelCase__ , '''rb''' ) as f:
snake_case : Optional[int] = f.read()
return bytes_
snake_case : List[Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case : Any = pa.array(
[os.path.basename(lowerCAmelCase__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
| 367
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=0.9 , UpperCamelCase__ : Any=None , ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = parent
snake_case : Tuple = batch_size
snake_case : str = image_size
snake_case : Tuple = num_channels
snake_case : List[Any] = patch_size
snake_case : Optional[Any] = tubelet_size
snake_case : Tuple = num_frames
snake_case : Optional[Any] = is_training
snake_case : Tuple = use_labels
snake_case : List[str] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Any = mask_ratio
snake_case : Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case : Dict = (image_size // patch_size) ** 2
snake_case : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case : Optional[int] = int(mask_ratio * self.seq_length )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
snake_case : Any = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : int = torch.ones((self.num_masks,) )
snake_case : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case : Tuple = mask.expand(self.batch_size , -1 ).bool()
snake_case : str = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
snake_case : Tuple = mask.sum().item()
snake_case : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Optional[int] = config_and_inputs
snake_case : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case : List[Any] = VideoMAEModelTester(self )
snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : Optional[int] = torch.ones((self.model_tester.num_masks,) )
snake_case : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case : Dict = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case : Optional[int] = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCamelCase__ )
snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if not self.has_attentions:
pass
else:
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = True
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case : Dict = True
snake_case : List[str] = False
snake_case : Tuple = True
snake_case : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Any = True
snake_case : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : int = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case : Any = len(UpperCamelCase__ )
# Check attention is always last and order is fine
snake_case : Union[str, Any] = True
snake_case : Union[str, Any] = True
snake_case : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
snake_case : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Union[str, Any] = outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
snake_case : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case ,snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
snake_case : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case : str = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
snake_case : Tuple = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Dict = prepare_video()
snake_case : int = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : int = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Optional[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
snake_case : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Tuple = prepare_video()
snake_case : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
snake_case : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case : Dict = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**UpperCamelCase__ )
# verify the logits
snake_case : str = torch.Size([1, 1408, 1536] )
snake_case : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case : Any = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case : str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
snake_case : Optional[int] = model(**UpperCamelCase__ )
snake_case : str = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 83
| 0
|
_snake_case = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any]=0 ) -> Any:
__UpperCAmelCase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowercase ) )
__UpperCAmelCase : int = np.random.RandomState(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**__lowercase ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : Tuple = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : str = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
# warmup pass to apply optimizations
__UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs() )
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : Any = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Tuple = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Optional[int] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def UpperCAmelCase ( self : List[str] ) -> Tuple:
__UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : Dict = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : str = np.random.RandomState(0 )
__UpperCAmelCase : Optional[Any] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : str = output.images
__UpperCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : int = init_image.resize((768, 512) )
__UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : int = np.random.RandomState(0 )
__UpperCAmelCase : Optional[int] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : str = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 114
| 0
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_A = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowercase_ ( A__ ) -> Optional[Any]:
"""simple docstring"""
snake_case = test_results.split(" " )
snake_case = 0
snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(A__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = {}
snake_case = None
snake_case = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , A__ ):
snake_case = True
snake_case = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
snake_case = line
snake_case = False
return failures
class lowerCamelCase :
def __init__(self : Tuple , _A : str , _A : Dict ) -> str:
snake_case = title
snake_case = doc_test_results["time_spent"].split("," )[0]
snake_case = doc_test_results["success"]
snake_case = doc_test_results["failures"]
snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case = doc_test_results
@property
def UpperCAmelCase(self : Optional[Any] ) -> str:
snake_case = [self._time_spent]
snake_case = 0
for time in time_spent:
snake_case = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
snake_case = [0, 0, time_parts[0]]
snake_case , snake_case , snake_case = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
snake_case , snake_case , snake_case = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f'{int(_A )}h{int(_A )}m{int(_A )}s'
@property
def UpperCAmelCase(self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCAmelCase(self : int ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCAmelCase(self : Dict ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCAmelCase(self : Dict ) -> Dict:
snake_case = 4_0
snake_case = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(_A , _A )}
snake_case = ""
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCAmelCase(self : int ) -> str:
snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def UpperCAmelCase() -> Dict:
snake_case = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_A , )
def UpperCAmelCase(self : int ) -> Dict:
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
snake_case = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
snake_case = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_A , )
def UpperCAmelCase(self : Optional[Any] , _A : List[Any] , _A : Union[str, Any] , _A : Optional[int] , _A : List[str] ) -> Dict:
snake_case = ""
for key, value in failures.items():
snake_case = value[:2_0_0] + " [Truncated]" if len(_A ) > 2_5_0 else value
failures_text += f'*{key}*\n_{value}_\n\n'
snake_case = job_name
snake_case = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
snake_case = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCAmelCase(self : Optional[Any] ) -> List[Any]:
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
snake_case = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
snake_case = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
snake_case = f'*Num failures* :{len(job_result["failed"] )} \n'
snake_case = job_result["failures"]
snake_case = self.get_reply_blocks(_A , _A , _A , text=_A )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=_A , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = os.environ["GITHUB_RUN_ID"]
snake_case = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
snake_case = requests.get(A__ ).json()
snake_case = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
snake_case = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
snake_case = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , A__ )
return {}
def lowercase_ ( A__ ) -> List[str]:
"""simple docstring"""
snake_case = {}
if os.path.exists(A__ ):
snake_case = os.listdir(A__ )
for file in files:
try:
with open(os.path.join(A__ , A__ ) , encoding="utf-8" ) as f:
snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(A__ , A__ )}.' ) from e
return _artifact
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
class lowerCamelCase :
def __init__(self : Any , _A : str ) -> List[str]:
snake_case = name
snake_case = []
def __str__(self : Union[str, Any] ) -> Dict:
return self.name
def UpperCAmelCase(self : Any , _A : str ) -> Union[str, Any]:
self.paths.append({"name": self.name, "path": path} )
snake_case = {}
snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case = directory
if artifact_name not in _available_artifacts:
snake_case = Artifact(A__ )
_available_artifacts[artifact_name].add_path(A__ )
return _available_artifacts
if __name__ == "__main__":
_A = get_job_links()
_A = retrieve_available_artifacts()
_A = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_A = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_A = github_actions_job_links.get("run_doctests")
_A = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_A = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_A , _A , _A = handle_test_results(artifact["stats"])
_A = failed
_A = success
_A = time_spent[1:-1] + ", "
_A = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_A = line.replace("FAILED ", "")
_A = line.split()[0].replace("\n", "")
if "::" in line:
_A , _A = line.split("::")
else:
_A , _A = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_A = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_A = all_failures[test] if test in all_failures else "N/A"
_A = failure
break
_A = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 137
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=A_ ):
UpperCAmelCase__ : Union[str, Any] = ["onnx"]
def __init__(self : Tuple , *_A : Optional[int] , **_A : Any ) -> Dict:
requires_backends(self , ["onnx"] )
@classmethod
def UpperCAmelCase(cls : int , *_A : Dict , **_A : List[Any] ) -> Optional[Any]:
requires_backends(cls , ["onnx"] )
@classmethod
def UpperCAmelCase(cls : Dict , *_A : Tuple , **_A : Optional[Any] ) -> int:
requires_backends(cls , ["onnx"] )
| 137
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class _lowercase :
"""simple docstring"""
def __init__(self ):
"""simple docstring"""
a = {}
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = {}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(lowerCamelCase_ )
if nodea not in self.connections:
self.add_node(lowerCamelCase_ )
a = probability
def UpperCamelCase_ (self ):
"""simple docstring"""
return list(self.connections )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = 0
a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def a( A : str , A : list[tuple[str, str, float]] , A : int ) -> dict[str, int]:
"""simple docstring"""
a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A , A , A )
a = Counter(graph.get_nodes() )
a = start
for _ in range(A ):
a = graph.transition(A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a( ) -> Optional[int]:
"""simple docstring"""
a = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
a = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
return image
def a( A : Tuple ) -> List[str]:
"""simple docstring"""
a = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def a( A : str , A : Tuple , A : Optional[int] ) -> int:
"""simple docstring"""
a = dct.pop(A )
a = val
def a( A : Union[str, Any] , A : str ) -> List[Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) )
a = qkv_bias
def a( A : Union[str, Any] , A : Tuple ) -> str:
"""simple docstring"""
a = 364 if "coco" in model_name else 224
a = BlipaVisionConfig(image_size=A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=A ).to_dict()
elif "opt-6.7b" in model_name:
a = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=A ).to_dict()
elif "t5-xl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
a = BlipaConfig(vision_config=A , text_config=A )
return config, image_size
@torch.no_grad()
def a( A : Optional[int] , A : List[str]=None , A : Optional[int]=False ) -> Any:
"""simple docstring"""
a = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
a = tokenizer("\n" , add_special_tokens=A ).input_ids[0]
a , a = get_blipa_config(A , eos_token_id=A )
a = BlipaForConditionalGeneration(A ).eval()
a = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
a , a = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a = "cuda" if torch.cuda.is_available() else "cpu"
a , a , a = load_model_and_preprocess(
name=A , model_type=A , is_eval=A , device=A )
original_model.eval()
print("Done!" )
# update state dict keys
a = original_model.state_dict()
a = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a = state_dict.pop(A )
if key.startswith("Qformer.bert" ):
a = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a = key.replace("self" , "attention" )
if "opt_proj" in key:
a = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
a = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
a = key.replace("opt" , "language" )
if key.startswith("t5" ):
a = key.replace("t5" , "language" )
a = val
# read in qv biases
read_in_q_v_bias(A , A )
a , a = hf_model.load_state_dict(A , strict=A )
assert len(A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a = load_demo_image()
a = vis_processors["eval"](A ).unsqueeze(0 ).to(A )
a = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(A )
# create processor
a = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=A , image_std=A )
a = BlipaProcessor(image_processor=A , tokenizer=A )
a = processor(images=A , return_tensors="pt" ).pixel_values.to(A )
# make sure processor creates exact same pixel values
assert torch.allclose(A , A )
original_model.to(A )
hf_model.to(A )
with torch.no_grad():
if "opt" in model_name:
a = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
a = hf_model(A , A ).logits
else:
a = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
a = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
a = hf_model(A , A , labels=A ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=A )
assert torch.allclose(logits[0, :3, :3] , A , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=A )
else:
# cast to same type
a = logits.dtype
assert torch.allclose(original_logits.to(A ) , A , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
a = ""
a = tokenizer(A , return_tensors="pt" ).input_ids.to(A )
a = original_model.generate({"image": original_pixel_values} )
a = hf_model.generate(
A , A , do_sample=A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , A )
a = input_ids.shape[1]
a = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=A )
a = [text.strip() for text in output_text]
print("HF generation:" , A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A )
hf_model.save_pretrained(A )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
_lowercase: Tuple = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase: Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227
| 1
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a( A : List[str] , A : Optional[int] , A : Tuple ) -> Tuple:
"""simple docstring"""
if openai_config_file == "":
a = OpenAIGPTConfig()
else:
a = OpenAIGPTConfig.from_json_file(A )
a = OpenAIGPTModel(A )
# Load weights from numpy
load_tf_weights_in_openai_gpt(A , A , A )
# Save pytorch-model
a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
a = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , A )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowercase: Tuple = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 71
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tempfile.mkdtemp()
a = 8
# DPR tok
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase_ ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase_ (self ):
"""simple docstring"""
a = os.path.join(self.tmpdirname , "rag_tokenizer" )
a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
a = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
a = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
a = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
a = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
a = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 71
| 1
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(__snake_case : int , __snake_case : Any="" , __snake_case : List[str]="." ):
lowerCamelCase_ =[]
for k, v in d.items():
lowerCamelCase_ =parent_key + sep + k if parent_key else k
if isinstance(__snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__snake_case , __snake_case , sep=__snake_case ).items() )
else:
items.append((new_key, v) )
return dict(__snake_case )
lowerCamelCase_ =argparse.Namespace()
with open(__snake_case , '''r''' ) as yaml_file:
try:
lowerCamelCase_ =yaml.load(__snake_case , Loader=yaml.FullLoader )
lowerCamelCase_ =flatten_yaml_as_dict(__snake_case )
for k, v in flat_cfg.items():
setattr(__snake_case , __snake_case , __snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(__snake_case , str(__snake_case ) ) )
return config
def a_ ( __snake_case : Tuple , __snake_case : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =MobileViTVaConfig()
lowerCamelCase_ =False
# dataset
if task_name.startswith('''imagenet1k_''' ):
lowerCamelCase_ =1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
lowerCamelCase_ =384
else:
lowerCamelCase_ =256
lowerCamelCase_ ='''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
lowerCamelCase_ =2_1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
lowerCamelCase_ =384
else:
lowerCamelCase_ =256
lowerCamelCase_ ='''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
lowerCamelCase_ =151
lowerCamelCase_ =512
lowerCamelCase_ ='''ade20k-id2label.json'''
lowerCamelCase_ =True
elif task_name.startswith('''voc_''' ):
lowerCamelCase_ =21
lowerCamelCase_ =512
lowerCamelCase_ ='''pascal-voc-id2label.json'''
lowerCamelCase_ =True
# orig_config
lowerCamelCase_ =load_orig_config_file(__snake_case )
assert getattr(__snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase_ =getattr(__snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(__snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase_ =getattr(__snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
lowerCamelCase_ ='''huggingface/label-files'''
lowerCamelCase_ =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
return config
def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =dct.pop(__snake_case )
lowerCamelCase_ =val
def a_ ( __snake_case : List[str] , __snake_case : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
if base_model:
lowerCamelCase_ =''''''
else:
lowerCamelCase_ ='''mobilevitv2.'''
lowerCamelCase_ =[]
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase_ =k[8:]
else:
lowerCamelCase_ =k
if ".block." in k:
lowerCamelCase_ =k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
lowerCamelCase_ =k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
lowerCamelCase_ =k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
lowerCamelCase_ =k_new.replace('''conv_1.''' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowerCamelCase_ =k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase_ =k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
lowerCamelCase_ =k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowerCamelCase_ =k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase_ =k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase_ =k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase_ =[0, 1]
elif i == 4:
lowerCamelCase_ =[0, 1, 2, 3]
elif i == 5:
lowerCamelCase_ =[0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase_ =k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase_ =k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase_ =k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase_ =k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
lowerCamelCase_ =k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
lowerCamelCase_ =k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
lowerCamelCase_ =k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
lowerCamelCase_ =k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
lowerCamelCase_ =k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
lowerCamelCase_ =k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
lowerCamelCase_ =k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
lowerCamelCase_ =k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a_ ( __snake_case : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(__snake_case )
for k in keys_to_ignore:
state_dict.pop(__snake_case , __snake_case )
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def a_ ( __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ =get_mobilevitva_config(__snake_case , __snake_case )
# load original state_dict
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
lowerCamelCase_ =MobileViTVaForSemanticSegmentation(__snake_case ).eval()
lowerCamelCase_ =False
else:
lowerCamelCase_ =MobileViTVaForImageClassification(__snake_case ).eval()
lowerCamelCase_ =False
# remove and rename some keys of load the original model
lowerCamelCase_ =checkpoint
remove_unused_keys(__snake_case )
lowerCamelCase_ =create_rename_keys(__snake_case , base_model=__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load modified state_dict
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ =model(**__snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
lowerCamelCase_ =outputs.logits
lowerCamelCase_ =logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase_ =torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
a_ : Dict = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 75
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline
lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
lowerCamelCase_ =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
lowerCamelCase_ =CLIPTextModel(lowerCAmelCase )
lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ ='''french fries'''
lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =[inputs['''prompt''']] * 2
lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
lowerCamelCase_ =image / 2 + 0.5
lowerCamelCase_ =image.permute(0, 3, 1, 2 )
lowerCamelCase_ =image.repeat(2, 1, 1, 1 )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' )
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0]
lowerCamelCase_ =components['''vae''']
lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ =pipe(**lowerCAmelCase )[0]
lowerCamelCase_ =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowerCamelCase_ ={
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =0
def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None:
lowerCamelCase_ =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ =latents[0, -3:, -3:, -1]
lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ =latents[0, -3:, -3:, -1]
lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase_ =False
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ =inputs['''image'''].resize((504, 504) )
lowerCamelCase_ ='''timbrooks/instruct-pix2pix'''
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase, safety_checker=lowerCAmelCase, )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 75
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = False
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
UpperCamelCase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
UpperCamelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
UpperCamelCase = reader.read()
UpperCamelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
UpperCamelCase = UNetaDModel(**config)
else:
UpperCamelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
UpperCamelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCamelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCamelCase = config[key]
del config[key]
UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
UpperCamelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
UpperCamelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
UpperCamelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
UpperCamelCase = param_value
UpperCamelCase = True
if not has_changed:
UpperCamelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 333
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "transfo-xl"
snake_case_ = ["mems"]
snake_case_ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int ,A : List[str]=26_77_35 ,A : Tuple=[2_00_00, 4_00_00, 20_00_00] ,A : int=10_24 ,A : Any=10_24 ,A : Dict=16 ,A : Union[str, Any]=64 ,A : Union[str, Any]=40_96 ,A : Union[str, Any]=4 ,A : Optional[Any]=False ,A : Any=18 ,A : Optional[int]=16_00 ,A : str=10_00 ,A : Optional[int]=True ,A : Dict=True ,A : Any=0 ,A : int=-1 ,A : str=True ,A : str=0.1 ,A : Dict=0.0 ,A : str=True ,A : Any="normal" ,A : List[str]=0.01 ,A : int=0.01 ,A : Optional[int]=0.02 ,A : int=1E-5 ,A : Optional[Any]=0 ,**A : Optional[int] ,):
__A = vocab_size
__A = []
self.cutoffs.extend(A )
if proj_share_all_but_first:
__A = [False] + [True] * len(self.cutoffs )
else:
__A = [False] + [False] * len(self.cutoffs )
__A = d_model
__A = d_embed
__A = d_head
__A = d_inner
__A = div_val
__A = pre_lnorm
__A = n_layer
__A = n_head
__A = mem_len
__A = same_length
__A = attn_type
__A = clamp_len
__A = sample_softmax
__A = adaptive
__A = dropout
__A = dropatt
__A = untie_r
__A = init
__A = init_range
__A = proj_init_std
__A = init_std
__A = layer_norm_epsilon
super().__init__(eos_token_id=A ,**A )
@property
def UpperCamelCase_ ( self : List[str] ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCamelCase_ ( self : Any ,A : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 15
|
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83
| 0
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_SCREAMING_SNAKE_CASE : Tuple = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
_SCREAMING_SNAKE_CASE : Optional[int] = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
def remove_articles(UpperCamelCase_ ):
snake_case = re.compile(r'''\b(a|an|the)\b''' ,re.UNICODE )
return re.sub(UpperCamelCase_ ,''' ''' ,UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ ):
snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return int(normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ ) )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = [any(compute_exact(UpperCamelCase_ ,UpperCamelCase_ ) for ref in refs ) for pred, refs in zip(UpperCamelCase_ ,UpperCamelCase_ )]
return (sum(UpperCamelCase_ ) / len(UpperCamelCase_ )) * 1_00
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = [rgram for rgrams in rgramslist for rgram in rgrams]
snake_case = Counter(UpperCamelCase_ )
snake_case = Counter(UpperCamelCase_ )
snake_case = Counter()
for sgram, scount in sgramcounter.items():
snake_case = scount * numref
snake_case = Counter(UpperCamelCase_ )
snake_case = Counter()
for cgram, ccount in cgramcounter.items():
snake_case = ccount * numref
# KEEP
snake_case = sgramcounter_rep & cgramcounter_rep
snake_case = keepgramcounter_rep & rgramcounter
snake_case = sgramcounter_rep & rgramcounter
snake_case = 0
snake_case = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case = 1
snake_case = 1
if len(UpperCamelCase_ ) > 0:
snake_case = keeptmpscorea / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case = keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case = 0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case = sgramcounter_rep - cgramcounter_rep
snake_case = delgramcounter_rep - rgramcounter
snake_case = sgramcounter_rep - rgramcounter
snake_case = 0
snake_case = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case = 1
if len(UpperCamelCase_ ) > 0:
snake_case = deltmpscorea / len(UpperCamelCase_ )
# ADDITION
snake_case = set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case = set(UpperCamelCase_ ) & set(UpperCamelCase_ )
snake_case = set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case = 1
snake_case = 1
if len(UpperCamelCase_ ) > 0:
snake_case = addtmpscore / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
snake_case = addtmpscore / len(UpperCamelCase_ )
snake_case = 0
if addscore_precision > 0 or addscore_recall > 0:
snake_case = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = len(UpperCamelCase_ )
snake_case = ssent.split(''' ''' )
snake_case = csent.split(''' ''' )
snake_case = []
snake_case = []
snake_case = []
snake_case = []
snake_case = []
snake_case = []
snake_case = []
snake_case = []
snake_case = []
snake_case = []
for rsent in rsents:
snake_case = rsent.split(''' ''' )
snake_case = []
snake_case = []
snake_case = []
ragramslist.append(UpperCamelCase_ )
for i in range(0 ,len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
for i in range(0 ,len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase_ )
for i in range(0 ,len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase_ )
((snake_case) , (snake_case) , (snake_case)) = SARIngram(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
((snake_case) , (snake_case) , (snake_case)) = SARIngram(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
((snake_case) , (snake_case) , (snake_case)) = SARIngram(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
((snake_case) , (snake_case) , (snake_case)) = SARIngram(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
snake_case = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case = sum([delascore, delascore, delascore, delascore] ) / 4
snake_case = sum([addascore, addascore, addascore, addascore] ) / 4
snake_case = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ = True ,UpperCamelCase_ = "13a" ,UpperCamelCase_ = True ):
"""simple docstring"""
if lowercase:
snake_case = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase_ )()(UpperCamelCase_ )
else:
snake_case = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase_ )
elif tokenizer == "moses":
snake_case = sacremoses.MosesTokenizer().tokenize(UpperCamelCase_ ,return_str=UpperCamelCase_ ,escape=UpperCamelCase_ )
elif tokenizer == "penn":
snake_case = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase_ ,return_str=UpperCamelCase_ )
else:
snake_case = sentence
if not return_str:
snake_case = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if not (len(UpperCamelCase_ ) == len(UpperCamelCase_ ) == len(UpperCamelCase_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
snake_case = 0
for src, pred, refs in zip(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
sari_score += SARIsent(normalize(UpperCamelCase_ ) ,normalize(UpperCamelCase_ ) ,[normalize(UpperCamelCase_ ) for sent in refs] )
snake_case = sari_score / len(UpperCamelCase_ )
return 1_00 * sari_score
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_="exp" ,UpperCamelCase_=None ,UpperCamelCase_=False ,UpperCamelCase_=False ,UpperCamelCase_=False ,):
"""simple docstring"""
snake_case = len(references[0] )
if any(len(UpperCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
snake_case = [[refs[i] for refs in references] for i in range(UpperCamelCase_ )]
snake_case = sacrebleu.corpus_bleu(
UpperCamelCase_ ,UpperCamelCase_ ,smooth_method=UpperCamelCase_ ,smooth_value=UpperCamelCase_ ,force=UpperCamelCase_ ,lowercase=UpperCamelCase_ ,use_effective_order=UpperCamelCase_ ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = {}
result.update({'''sari''': compute_sari(sources=__snake_case , predictions=__snake_case , references=__snake_case )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__snake_case , references=__snake_case )} )
result.update({'''exact''': compute_em(predictions=__snake_case , references=__snake_case )} )
return result
| 366
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_SCREAMING_SNAKE_CASE : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase_ ,id=UpperCamelCase_ )
| 213
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( A__ ):
_lowercase : BigBirdConfig
_lowercase : jnp.dtype = jnp.floataa
_lowercase : bool = True
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
super().setup()
SCREAMING_SNAKE_CASE = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *a , **a) -> List[Any]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
SCREAMING_SNAKE_CASE = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( A__ ):
_lowercase : Optional[Any] = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
def cross_entropy(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = logits.shape[-1]
SCREAMING_SNAKE_CASE = (labels[..., None] == jnp.arange(_UpperCAmelCase)[None]).astype('f4')
SCREAMING_SNAKE_CASE = jax.nn.log_softmax(_UpperCAmelCase , axis=-1)
SCREAMING_SNAKE_CASE = -jnp.sum(labels * logits , axis=-1)
if reduction is not None:
SCREAMING_SNAKE_CASE = reduction(_UpperCAmelCase)
return loss
SCREAMING_SNAKE_CASE = partial(_UpperCAmelCase , reduction=jnp.mean)
SCREAMING_SNAKE_CASE = cross_entropy(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = cross_entropy(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = cross_entropy(_UpperCAmelCase , _UpperCAmelCase)
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
_lowercase : str = "google/bigbird-roberta-base"
_lowercase : int = 30_00
_lowercase : int = 1_05_00
_lowercase : int = 1_28
_lowercase : int = 3
_lowercase : int = 1
_lowercase : int = 5
# tx_args
_lowercase : float = 3e-5
_lowercase : float = 0.0
_lowercase : int = 2_00_00
_lowercase : float = 0.0095
_lowercase : str = "bigbird-roberta-natural-questions"
_lowercase : str = "training-expt"
_lowercase : str = "data/nq-training.jsonl"
_lowercase : str = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
os.makedirs(self.base_dir , exist_ok=a)
SCREAMING_SNAKE_CASE = os.path.join(self.base_dir , self.save_dir)
SCREAMING_SNAKE_CASE = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
_lowercase : int
_lowercase : int = 40_96 # no dynamic padding on TPUs
def __call__( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = self.collate_fn(a)
SCREAMING_SNAKE_CASE = jax.tree_util.tree_map(a , a)
return batch
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.fetch_inputs(features['input_ids'])
SCREAMING_SNAKE_CASE = {
'input_ids': jnp.array(a , dtype=jnp.intaa),
'attention_mask': jnp.array(a , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = [self._fetch_inputs(a) for ids in input_ids]
return zip(*a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = [1 for _ in range(len(a))]
while len(a) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if seed is not None:
SCREAMING_SNAKE_CASE = dataset.shuffle(seed=_UpperCAmelCase)
for i in range(len(_UpperCAmelCase) // batch_size):
SCREAMING_SNAKE_CASE = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_UpperCAmelCase)
@partial(jax.pmap , axis_name='batch')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
def loss_fn(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = model_inputs.pop('start_labels')
SCREAMING_SNAKE_CASE = model_inputs.pop('end_labels')
SCREAMING_SNAKE_CASE = model_inputs.pop('pooled_labels')
SCREAMING_SNAKE_CASE = state.apply_fn(**_UpperCAmelCase , params=_UpperCAmelCase , dropout_rng=_UpperCAmelCase , train=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs
return state.loss_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jax.random.split(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = jax.value_and_grad(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = grad_fn(state.params)
SCREAMING_SNAKE_CASE = jax.lax.pmean({'loss': loss} , axis_name='batch')
SCREAMING_SNAKE_CASE = jax.lax.pmean(_UpperCAmelCase , 'batch')
SCREAMING_SNAKE_CASE = state.apply_gradients(grads=_UpperCAmelCase)
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch')
def lowerCamelCase__ (_UpperCAmelCase , **_UpperCAmelCase):
SCREAMING_SNAKE_CASE = model_inputs.pop('start_labels')
SCREAMING_SNAKE_CASE = model_inputs.pop('end_labels')
SCREAMING_SNAKE_CASE = model_inputs.pop('pooled_labels')
SCREAMING_SNAKE_CASE = state.apply_fn(**_UpperCAmelCase , params=state.params , train=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs
SCREAMING_SNAKE_CASE = state.loss_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = jax.lax.pmean({'loss': loss} , axis_name='batch')
return metrics
class _snake_case ( train_state.TrainState ):
_lowercase : Callable = struct.field(pytree_node=A__ )
@dataclass
class _snake_case :
_lowercase : Args
_lowercase : Callable
_lowercase : Callable
_lowercase : Callable
_lowercase : Callable
_lowercase : wandb
_lowercase : Callable = None
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None) -> Dict:
SCREAMING_SNAKE_CASE = model.params
SCREAMING_SNAKE_CASE = TrainState.create(
apply_fn=model.__call__ , params=a , tx=a , loss_fn=a , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = restore_checkpoint(a , a)
SCREAMING_SNAKE_CASE = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = build_tx(**a)
SCREAMING_SNAKE_CASE = train_state.TrainState(
step=a , apply_fn=model.__call__ , params=a , tx=a , opt_state=a , )
SCREAMING_SNAKE_CASE = args
SCREAMING_SNAKE_CASE = data_collator
SCREAMING_SNAKE_CASE = lr
SCREAMING_SNAKE_CASE = params
SCREAMING_SNAKE_CASE = jax_utils.replicate(a)
return state
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = self.args
SCREAMING_SNAKE_CASE = len(a) // args.batch_size
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = jax.random.split(a , jax.device_count())
for epoch in range(args.max_epochs):
SCREAMING_SNAKE_CASE = jnp.array(0 , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = get_batched_dataset(a , args.batch_size , seed=a)
SCREAMING_SNAKE_CASE = 0
for batch in tqdm(a , total=a , desc=f'''Running EPOCH-{epoch}'''):
SCREAMING_SNAKE_CASE = self.data_collator(a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.train_step_fn(a , a , **a)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE = jax_utils.unreplicate(state.step)
SCREAMING_SNAKE_CASE = running_loss.item() / i
SCREAMING_SNAKE_CASE = self.scheduler_fn(state_step - 1)
SCREAMING_SNAKE_CASE = self.evaluate(a , a)
SCREAMING_SNAKE_CASE = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(a))
self.logger.log(a , commit=a)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = get_batched_dataset(a , self.args.batch_size)
SCREAMING_SNAKE_CASE = len(a) // self.args.batch_size
SCREAMING_SNAKE_CASE = jnp.array(0 , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = 0
for batch in tqdm(a , total=a , desc='Evaluating ... '):
SCREAMING_SNAKE_CASE = self.data_collator(a)
SCREAMING_SNAKE_CASE = self.val_step_fn(a , **a)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = jax_utils.unreplicate(a)
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ')
self.model_save_fn(a , params=state.params)
with open(os.path.join(a , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(a , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(a , 'data_collator.joblib'))
with open(os.path.join(a , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , a)
print('DONE')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ')
with open(os.path.join(_UpperCAmelCase , 'flax_model.msgpack') , 'rb') as f:
SCREAMING_SNAKE_CASE = from_bytes(state.params , f.read())
with open(os.path.join(_UpperCAmelCase , 'opt_state.msgpack') , 'rb') as f:
SCREAMING_SNAKE_CASE = from_bytes(state.opt_state , f.read())
SCREAMING_SNAKE_CASE = joblib.load(os.path.join(_UpperCAmelCase , 'args.joblib'))
SCREAMING_SNAKE_CASE = joblib.load(os.path.join(_UpperCAmelCase , 'data_collator.joblib'))
with open(os.path.join(_UpperCAmelCase , 'training_state.json') , 'r') as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = training_state['step']
print('DONE')
return params, opt_state, step, args, data_collator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE = optax.linear_schedule(init_value=_UpperCAmelCase , end_value=_UpperCAmelCase , transition_steps=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = optax.linear_schedule(init_value=_UpperCAmelCase , end_value=1e-7 , transition_steps=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps])
return lr
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
def weight_decay_mask(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = scheduler_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = optax.adamw(learning_rate=_UpperCAmelCase , weight_decay=_UpperCAmelCase , mask=_UpperCAmelCase)
return tx, lr
| 137
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _snake_case ( A__ ):
_lowercase : int = '''Speech2TextFeatureExtractor'''
_lowercase : List[Any] = '''Speech2TextTokenizer'''
def __init__( self , a , a) -> str:
super().__init__(a , a)
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
def __call__( self , *a , **a) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech')
else:
SCREAMING_SNAKE_CASE = kwargs.pop('audio' , a)
SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , a)
SCREAMING_SNAKE_CASE = kwargs.pop('text' , a)
if len(a) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor(a , *a , sampling_rate=a , **a)
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(a , **a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Union[str, Any]:
return self.tokenizer.batch_decode(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[str]:
return self.tokenizer.decode(*a , **a)
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer
yield
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
| 137
| 1
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCamelCase = sys.version_info >= (3, 10)
def UpperCamelCase_( snake_case__: Union[str, Any]=None , snake_case__: str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
class lowercase ( a__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """titi"""
__SCREAMING_SNAKE_CASE = """toto"""
class lowercase ( a__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """titi"""
__SCREAMING_SNAKE_CASE = """toto"""
__SCREAMING_SNAKE_CASE = 42
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """toto"""
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = BasicEnum(self.foo )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """toto"""
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[] )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[1, 2, 3] )
__SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__SCREAMING_SNAKE_CASE = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = field()
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[] )
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase__ = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
UpperCAmelCase__ = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _lowerCamelCase ) and yy.get('choices' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_lowerCamelCase ) , yy['type'](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('--bar' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('--baz' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('--flag' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='?' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(UpperCAmelCase__) = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_lowerCamelCase )
expected.add_argument('--baz' , default='toto' , type=_lowerCamelCase , help='help message' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='?' )
expected.add_argument('--baz' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_lowerCamelCase , dest='baz' )
expected.add_argument('--opt' , type=_lowerCamelCase , default=_lowerCamelCase )
UpperCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
UpperCAmelCase__ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase__ = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase__ = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """toto"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_lowerCamelCase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_lowerCamelCase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase__ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('--bar' , default=_lowerCamelCase , type=_lowerCamelCase , help='help message' )
expected.add_argument('--baz' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=_lowerCamelCase )
expected.add_argument('--des' , nargs='+' , default=[] , type=_lowerCamelCase )
UpperCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
UpperCAmelCase__ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('--required_str' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_lowerCamelCase , )
expected.add_argument('--opt' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('--baz' , default='toto' , type=_lowerCamelCase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
UpperCAmelCase__ = parser.parse_dict(_lowerCamelCase )[0]
UpperCAmelCase__ = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = os.path.join(_lowerCamelCase , 'temp_json' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
UpperCAmelCase__ = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = os.path.join(_lowerCamelCase , 'temp_yaml' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
UpperCAmelCase__ = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 354
|
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A ( a_ ) -> int:
__UpperCamelCase : List[str] =384
if "tiny" in model_name:
__UpperCamelCase : Tuple =[3, 3, 9, 3]
__UpperCamelCase : Union[str, Any] =[96, 192, 384, 768]
if "small" in model_name:
__UpperCamelCase : Optional[Any] =[3, 3, 27, 3]
__UpperCamelCase : Dict =[96, 192, 384, 768]
if "base" in model_name:
__UpperCamelCase : Union[str, Any] =[3, 3, 27, 3]
__UpperCamelCase : Any =[128, 256, 512, 1_024]
__UpperCamelCase : List[str] =512
if "large" in model_name:
__UpperCamelCase : Union[str, Any] =[3, 3, 27, 3]
__UpperCamelCase : Tuple =[192, 384, 768, 1_536]
__UpperCamelCase : str =768
if "xlarge" in model_name:
__UpperCamelCase : List[Any] =[3, 3, 27, 3]
__UpperCamelCase : Dict =[256, 512, 1_024, 2_048]
__UpperCamelCase : int =1_024
# set label information
__UpperCamelCase : int =150
__UpperCamelCase : List[str] ='huggingface/label-files'
__UpperCamelCase : int ='ade20k-id2label.json'
__UpperCamelCase : Union[str, Any] =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Any ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : List[str] ={v: k for k, v in idalabel.items()}
__UpperCamelCase : Optional[Any] =ConvNextConfig(
depths=a_ ,hidden_sizes=a_ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__UpperCamelCase : int =UperNetConfig(
backbone_config=a_ ,auxiliary_in_channels=a_ ,num_labels=a_ ,idalabel=a_ ,labelaid=a_ ,)
return config
def A ( a_ ) -> str:
__UpperCamelCase : Tuple =[]
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def A ( a_ ,a_ ,a_ ) -> Optional[Any]:
__UpperCamelCase : int =dct.pop(a_ )
__UpperCamelCase : Optional[Any] =val
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
__UpperCamelCase : Tuple ={
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
__UpperCamelCase : Optional[Any] =model_name_to_url[model_name]
__UpperCamelCase : str =torch.hub.load_state_dict_from_url(a_ ,map_location='cpu' )['state_dict']
__UpperCamelCase : Dict =get_upernet_config(a_ )
__UpperCamelCase : Tuple =UperNetForSemanticSegmentation(a_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCamelCase : Any =state_dict.pop(a_ )
if "bn" in key:
__UpperCamelCase : List[Any] =key.replace('bn' ,'batch_norm' )
__UpperCamelCase : int =val
# rename keys
__UpperCamelCase : List[Any] =create_rename_keys(a_ )
for src, dest in rename_keys:
rename_key(a_ ,a_ ,a_ )
model.load_state_dict(a_ )
# verify on image
__UpperCamelCase : List[Any] ='https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__UpperCamelCase : Union[str, Any] =Image.open(requests.get(a_ ,stream=a_ ).raw ).convert('RGB' )
__UpperCamelCase : Union[str, Any] =SegformerImageProcessor()
__UpperCamelCase : Optional[Any] =processor(a_ ,return_tensors='pt' ).pixel_values
with torch.no_grad():
__UpperCamelCase : List[Any] =model(a_ )
if model_name == "upernet-convnext-tiny":
__UpperCamelCase : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
__UpperCamelCase : Optional[int] =torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
__UpperCamelCase : Union[str, Any] =torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
__UpperCamelCase : Optional[int] =torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCamelCase : Any =torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('Logits:' ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,a_ ,atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a_ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(a_ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f"upernet-convnext-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ :Any = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 71
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Tuple = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Union[str, Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A_ :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71
| 1
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = 8
# DPR tok
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
SCREAMING_SNAKE_CASE : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE : Dict = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : str = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _A ( self : str ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _A ( self : Optional[int] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _A ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , "rag_tokenizer" )
SCREAMING_SNAKE_CASE : str = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
SCREAMING_SNAKE_CASE : Optional[int] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(UpperCAmelCase_ )
rag_tokenizer.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
SCREAMING_SNAKE_CASE : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 319
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
A_ : str = False
A_ : Dict = True
A_ : Tuple = False
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
A_ : Optional[int] = parser.parse_args()
A_ : Any = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
A_ : List[str] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
A_ : Tuple = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
A_ : Any = reader.read()
A_ : str = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
A_ : Tuple = UNetaDModel(**config)
else:
A_ : Union[str, Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
A_ : int = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
A_ : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
A_ : int = config[key]
del config[key]
A_ : Tuple = [k.replace('UNetRes', '') for k in config['down_block_types']]
A_ : Any = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
A_ : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
A_ : int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
A_ : List[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
A_ : List[Any] = param_value
A_ : List[str] = True
if not has_changed:
A_ : Dict = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 333
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333
| 1
|
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = len(_UpperCamelCase )
__lowerCAmelCase : str = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCAmelCase : int = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCAmelCase : List[str] = True
if a[i].islower():
__lowerCAmelCase : Optional[int] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = ShapEPipeline
A_ : str = ['prompt']
A_ : Any = ['prompt']
A_ : List[Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Optional[int] = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : int = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : Union[str, Any] = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.dummy_prior
__lowerCAmelCase : str = self.dummy_text_encoder
__lowerCAmelCase : List[Any] = self.dummy_tokenizer
__lowerCAmelCase : str = self.dummy_renderer
__lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'cpu'
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images[0]
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = 1
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Dict = batch_size * [inputs[key]]
__lowerCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__lowerCAmelCase : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
'a shark' , generator=_SCREAMING_SNAKE_CASE , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 182
| 0
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 25, 50]
SCREAMING_SNAKE_CASE__ = [25, 50, 75]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(75)
SCREAMING_SNAKE_CASE__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 150
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ : str = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else key for key in keys]
lowercase_ : List[Any] = Counter(__UpperCamelCase )
lowercase_ : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = super().construct_mapping(__UpperCamelCase ,deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ : Dict = full_content[1:].index('---' ) + 1
lowercase_ : Optional[int] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
# class attributes
lowercase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ , lowercase_ : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ : Dict = readme_file.read()
else:
lowercase_ : int = None
lowercase_ : Any = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
lowercase_ , lowercase_ : Optional[Any] = _split_yaml_from_readme(__UpperCamelCase )
lowercase_ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
lowercase_ : List[str] = yaml.load(__UpperCamelCase ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ : Dict = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__UpperCamelCase ,allow_unicode=__UpperCamelCase ,encoding='utf-8' ,).decode('utf-8' )
__SCREAMING_SNAKE_CASE ={
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE =ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__SCREAMING_SNAKE_CASE =ap.parse_args()
__SCREAMING_SNAKE_CASE =Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 213
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = botoa.client('''iam''' )
SCREAMING_SNAKE_CASE : Any = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_lowercase , AssumeRolePolicyDocument=json.dumps(_lowercase , indent=2 ) )
SCREAMING_SNAKE_CASE : Tuple = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_lowercase , PolicyName=f"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_lowercase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"""role {role_name} already exists. Using existing one""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = botoa.client('''iam''' )
return iam_client.get_role(RoleName=_lowercase )["Role"]["Arn"]
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , _lowercase , )
SCREAMING_SNAKE_CASE : Any = None
if credentials_configuration == 0:
SCREAMING_SNAKE_CASE : List[str] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
SCREAMING_SNAKE_CASE : Dict = _ask_field('''AWS Access Key ID: ''' )
SCREAMING_SNAKE_CASE : Optional[int] = aws_access_key_id
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field('''AWS Secret Access Key: ''' )
SCREAMING_SNAKE_CASE : List[Any] = aws_secret_access_key
SCREAMING_SNAKE_CASE : Any = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
SCREAMING_SNAKE_CASE : int = aws_region
SCREAMING_SNAKE_CASE : int = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , _lowercase , )
if role_management == 0:
SCREAMING_SNAKE_CASE : int = _ask_field('''Enter your IAM role name: ''' )
else:
SCREAMING_SNAKE_CASE : Tuple = '''accelerate_sagemaker_execution_role'''
print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_lowercase )
SCREAMING_SNAKE_CASE : str = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowercase , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : int = None
if is_custom_docker_image:
SCREAMING_SNAKE_CASE : Optional[int] = _ask_field('''Enter your Docker image: ''' , lambda _lowercase : str(_lowercase ).lower() )
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowercase , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : Optional[int] = None
if is_sagemaker_inputs_enabled:
SCREAMING_SNAKE_CASE : Any = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda _lowercase : str(_lowercase ).lower() , )
SCREAMING_SNAKE_CASE : List[str] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowercase , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : Any = None
if is_sagemaker_metrics_enabled:
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda _lowercase : str(_lowercase ).lower() , )
SCREAMING_SNAKE_CASE : Any = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Tuple = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=_lowercase , error_message='''Please enter yes or no.''' , )
if use_dynamo:
SCREAMING_SNAKE_CASE : Dict = '''dynamo_'''
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
SCREAMING_SNAKE_CASE : Any = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowercase , error_message='''Please enter yes or no.''' , )
if use_custom_options:
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
'''Which mode do you want to use?''' , _lowercase , lambda _lowercase : TORCH_DYNAMO_MODES[int(_lowercase )] , default='''default''' , )
SCREAMING_SNAKE_CASE : Tuple = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowercase , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[str] = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_lowercase , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[Any] = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
SCREAMING_SNAKE_CASE : Tuple = _ask_options(
_lowercase , _lowercase , lambda _lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowercase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(_lowercase , lambda _lowercase : str(_lowercase ).lower() , default='''ml.p3.2xlarge''' )
SCREAMING_SNAKE_CASE : str = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
SCREAMING_SNAKE_CASE : Optional[int] = _ask_field(
'''How many machines do you want use? [1]: ''' , _lowercase , default=1 , )
SCREAMING_SNAKE_CASE : str = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=_lowercase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_lowercase , use_cpu=_lowercase , dynamo_config=_lowercase , eca_instance_type=_lowercase , profile=_lowercase , region=_lowercase , iam_role_name=_lowercase , mixed_precision=_lowercase , num_machines=_lowercase , sagemaker_inputs_file=_lowercase , sagemaker_metrics_file=_lowercase , )
| 258
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '▁'
__UpperCamelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCamelCase : List[str] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCamelCase : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Dict = len(self.sp_model )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ )
}
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : Any = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __A ( self : Tuple , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : Union[str, Any] = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tgt_lang_id
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self : int , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __A ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = src_lang
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def __A ( self : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
| 258
| 1
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : bool , lowerCamelCase : bool ):
def run_func(lowerCamelCase : List[str] ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase : List[str] , **lowerCamelCase : Tuple ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase : List[Any] , **lowerCamelCase : Any ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def __UpperCAmelCase ( self : int ) -> Optional[int]:
return tf.__version__
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , training=UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
UpperCAmelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCAmelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__ )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(UpperCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(UpperCAmelCase__ )
lowerCAmelCase = Memory(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(UpperCAmelCase__ )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 4
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
| 0
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_lowerCAmelCase =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCamelCase ) )
return round(__UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341
| 1
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase = 3
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
print('''Generating primitive root of p''' )
while True:
A: Optional[Any] = random.randrange(3 , __lowercase )
if pow(__lowercase , 2 , __lowercase ) == 1:
continue
if pow(__lowercase , __lowercase , __lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('''Generating prime p...''' )
A: Any = rabin_miller.generate_large_prime(__lowercase ) # select large prime number.
A: Optional[int] = primitive_root(__lowercase ) # one primitive root on modulo p.
A: Optional[int] = random.randrange(3 , __lowercase ) # private_key -> have to be greater than 2 for safety.
A: List[str] = cryptomath.find_mod_inverse(pow(__lowercase , __lowercase , __lowercase ) , __lowercase )
A: Any = (key_size, e_a, e_a, p)
A: int = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
A , A: Union[str, Any] = generate_key(__lowercase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE( ) -> None:
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 319
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCamelCase ( unittest.TestCase , a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[int] = load_tool('text-classification')
self.tool.setup()
a : Optional[int] = load_tool('text-classification' , remote=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Union[str, Any] = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : int = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
| 345
|
'''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345
| 1
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=1_8 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=False , ):
'''simple docstring'''
lowerCamelCase__ = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
lowerCamelCase__ = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
lowerCamelCase__ = do_reduce_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
lowerCamelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''' )
lowerCamelCase__ = Image.open(dataset[0]['''file'''] )
lowerCamelCase__ = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''' )
lowerCamelCase__ = Image.open(ds[0]['''file'''] )
lowerCamelCase__ = Image.open(ds[1]['''file'''] )
lowerCamelCase__ = Image.open(ds[2]['''file'''] )
lowerCamelCase__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __A ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = BeitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BeitImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_0, '''width''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase__ )
lowerCamelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=UpperCamelCase__ )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase__ )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase__ = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase__ = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase__ = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
lowerCamelCase__ = []
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched
lowerCamelCase__ = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
lowerCamelCase__ = prepare_semantic_single_inputs()
lowerCamelCase__ = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched input (PIL images)
lowerCamelCase__ = prepare_semantic_batch_inputs()
lowerCamelCase__ = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCamelCase__ = prepare_semantic_single_inputs()
lowerCamelCase__ = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0 )
lowerCamelCase__ = True
lowerCamelCase__ = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
| 209
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any , UpperCamelCase__ : List[str]=80 , UpperCamelCase__ : Tuple=1_6000 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : int=25 , UpperCamelCase__ : Optional[Any]="hamming_window" , UpperCamelCase__ : Tuple=3_2768.0 , UpperCamelCase__ : str=0.97 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = feature_size
SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE : Tuple = win_length
SCREAMING_SNAKE_CASE : Union[str, Any] = frame_signal_scale
SCREAMING_SNAKE_CASE : int = preemphasis_coeff
SCREAMING_SNAKE_CASE : List[Any] = mel_floor
SCREAMING_SNAKE_CASE : int = normalize_means
SCREAMING_SNAKE_CASE : List[str] = normalize_vars
SCREAMING_SNAKE_CASE : Any = win_function
SCREAMING_SNAKE_CASE : Union[str, Any] = return_attention_mask
SCREAMING_SNAKE_CASE : int = win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE : Optional[int] = hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE : int = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE : Tuple = (self.n_fft // 2) + 1
def __A ( self : str , UpperCamelCase__ : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=UpperCamelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCamelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCamelCase__ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if self.normalize_means:
SCREAMING_SNAKE_CASE : str = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE : List[str] = np.subtract(UpperCamelCase__ , UpperCamelCase__ )
if self.normalize_vars:
SCREAMING_SNAKE_CASE : str = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE : Optional[int] = np.divide(UpperCamelCase__ , UpperCamelCase__ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE : List[str] = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE : str = x.astype(np.floataa )
return x
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[np.ndarray] , UpperCamelCase__ : Optional[np.ndarray] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(UpperCamelCase__ , UpperCamelCase__ , self.padding_value ) for x, n in zip(UpperCamelCase__ , UpperCamelCase__ )]
def __call__( self : Dict , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Any = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : str = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : int = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE : Optional[Any] = [self._extract_mfsc_features(UpperCamelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.pad(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE : Optional[Any] = (
np.array(UpperCamelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
| 182
| 0
|
'''simple docstring'''
from __future__ import annotations
__A : Tuple = list[list[int]]
# assigning initial values to the grid
__A : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCAmelCase ( lowerCamelCase_ :Matrix , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCAmelCase ( lowerCamelCase_ :Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCAmelCase ( lowerCamelCase_ :Matrix ):
'''simple docstring'''
if location := find_empty_location(lowerCamelCase_ ):
snake_case_ : str = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : str = digit
if sudoku(lowerCamelCase_ ) is not None:
return grid
snake_case_ : Any = 0
return None
def UpperCAmelCase ( lowerCamelCase_ :Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCamelCase_ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__A : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 370
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,):
snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : str = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : int = min_resolution
snake_case_ : int = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Any = apply_ocr
def a__ ( self :Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self :int ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self :Any ):
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) )
def a__ ( self :int ):
snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} )
def a__ ( self :Optional[Any] ):
pass
def a__ ( self :Union[str, Any] ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,Image.Image )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_UpperCamelCase )
self.assertIsInstance(encoding.boxes ,_UpperCamelCase )
# Test batched
snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Tuple ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Optional[Any] ):
# Initialize image_processing
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :List[Any] ):
# with apply_OCR = True
snake_case_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_UpperCamelCase )
self.assertListEqual(encoding.boxes ,_UpperCamelCase )
# with apply_OCR = False
snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
| 8
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''codegen'''
__lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self : List[Any] , _lowerCAmelCase : int=5_0400 , _lowerCAmelCase : Optional[Any]=2048 , _lowerCAmelCase : List[str]=2048 , _lowerCAmelCase : List[Any]=4096 , _lowerCAmelCase : Union[str, Any]=28 , _lowerCAmelCase : str=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Union[str, Any]="gelu_new" , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[str]=1e-5 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=5_0256 , _lowerCAmelCase : Any=5_0256 , _lowerCAmelCase : Any=False , **_lowerCAmelCase : str , ):
A = vocab_size
A = n_ctx
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = rotary_dim
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = use_cache
A = bos_token_id
A = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase )
if not getattr(self._config , """pad_token_id""" , _lowerCAmelCase ):
# TODO: how to do that better?
A = 0
@property
def A (self : str ):
A = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
A = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A (self : Union[str, Any] ):
return self._config.n_layer
@property
def A (self : int ):
return self._config.n_head
def A (self : str , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
A = super(_lowerCAmelCase , self ).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
A = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A , A = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A = seqlen + 2
A = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
A = common_inputs["""attention_mask"""]
if self.use_past:
A = ordered_inputs["""attention_mask"""].dtype
A = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A (self : Optional[Any] ):
return 13
| 258
|
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : str = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
_lowerCamelCase : Tuple = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = {"""BertModelTest""": """BertModelTester"""}
A = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : str ):
A = get_model_to_test_mapping(_lowerCAmelCase )
A = get_model_to_test_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 258
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _snake_case ( *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCamelCase = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
'''simple docstring'''
A: Optional[Any] = pipeline(
'''document-question-answering''' , model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
A: List[str] = INVOICE_URL
A: int = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '''''' ) ) )
A: Union[str, Any] = '''What is the placebo?'''
A: Union[str, Any] = [
{
'''image''': load_image(SCREAMING_SNAKE_CASE_ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
'''simple docstring'''
A: Any = dqa_pipeline(SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''answer''': ANY(SCREAMING_SNAKE_CASE_ ), '''start''': ANY(SCREAMING_SNAKE_CASE_ ), '''end''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''answer''': ANY(SCREAMING_SNAKE_CASE_ ), '''start''': ANY(SCREAMING_SNAKE_CASE_ ), '''end''': ANY(SCREAMING_SNAKE_CASE_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: List[Any] = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
A: Union[str, Any] = INVOICE_URL
A: Optional[Any] = '''How many cats are there?'''
A: List[Any] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
A: str = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , SCREAMING_SNAKE_CASE_ )
A: int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , SCREAMING_SNAKE_CASE_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A: str = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A: Dict = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
# We can optionnally pass directly the words and bounding boxes
A: Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A: Union[str, Any] = []
A: str = []
A: Any = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , words=SCREAMING_SNAKE_CASE_ , boxes=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A: Optional[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
A: int = INVOICE_URL
A: Optional[int] = '''What is the invoice number?'''
A: str = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A: Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A: int = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Any = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
A: int = INVOICE_URL
A: Any = '''What is the invoice number?'''
A: List[Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A: str = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A: Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A: Optional[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=SCREAMING_SNAKE_CASE_ , revision='''3dc6de3''' , )
A: List[str] = INVOICE_URL
A: Tuple = '''What is the invoice number?'''
A: Union[str, Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
A: Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
A: int = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
A: List[str] = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '''''' ) ) )
# This model should also work if `image` is set to None
A: List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Tuple = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=SCREAMING_SNAKE_CASE_ , revision='''3dc6de3''' , max_seq_len=50 , )
A: Dict = INVOICE_URL
A: Dict = '''What is the invoice number?'''
A: Dict = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A: List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
A: Any = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '''''' ) ) )
# This model should also work if `image` is set to None
A: int = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A: Optional[Any] = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
A: int = INVOICE_URL
A: str = '''What is the invoice number?'''
A: Optional[Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def _snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
| 334
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 334
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_snake_case = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["pixel_values"]
def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
super().__init__(**UpperCAmelCase )
_snake_case = size if size is not None else {"""height""": 256, """width""": 256}
_snake_case = get_size_dict(UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
_snake_case = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
_snake_case = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
_snake_case = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 341
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->list[float]:
a__ , a__: Optional[int] = coefficient_matrix.shape
a__ , a__: str = constant_matrix.shape
if rowsa != colsa:
a__: Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(UpperCAmelCase__ )
if colsa != 1:
a__: str = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(UpperCAmelCase__ )
if rowsa != rowsa:
a__: Optional[Any] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != rowsa:
a__: List[str] = (
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(UpperCAmelCase__ )} and {rowsa}'
)
raise ValueError(UpperCAmelCase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
a__: Union[str, Any] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
a__ , a__: Any = table.shape
strictly_diagonally_dominant(UpperCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase__ ):
a__: Tuple = []
for row in range(UpperCAmelCase__ ):
a__: Union[str, Any] = 0
for col in range(UpperCAmelCase__ ):
if col == row:
a__: Union[str, Any] = table[row][col]
elif col == cols - 1:
a__: Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a__: int = (temp + val) / denom
new_val.append(UpperCAmelCase__ )
a__: Optional[int] = new_val
return [float(UpperCAmelCase__ ) for i in new_val]
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__ , a__: Optional[Any] = table.shape
a__: Optional[int] = True
for i in range(0 , UpperCAmelCase__ ):
a__: Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ) -> Any:
'''simple docstring'''
a__: Union[str, Any] = parent
a__: int = batch_size
a__: List[Any] = image_size
a__: Any = num_channels
a__: Dict = embeddings_size
a__: str = hidden_sizes
a__: List[Any] = depths
a__: Optional[int] = is_training
a__: Optional[int] = use_labels
a__: Tuple = hidden_act
a__: Any = num_labels
a__: Union[str, Any] = scope
a__: Tuple = len(lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__: str = None
if self.use_labels:
a__: Dict = ids_tensor([self.batch_size] , self.num_labels)
a__: int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: Dict = RegNetModel(config=lowercase)
model.to(lowercase)
model.eval()
a__: List[str] = model(lowercase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.num_labels
a__: Tuple = RegNetForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__: Any = model(lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = self.prepare_config_and_inputs()
a__ , a__ , a__: List[Any] = config_and_inputs
a__: Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a__ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Dict = RegNetModelTester(self)
a__: Dict = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: int = model_class(lowercase)
a__: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: int = [*signature.parameters.keys()]
a__: List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__ , a__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: int = model_class(config=lowercase)
for name, module in model.named_modules():
if isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__: int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: int = model(**self._prepare_for_class(lowercase , lowercase))
a__: Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__: List[str] = self.model_tester.num_stages
self.assertEqual(len(lowercase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ , a__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__: Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__: Dict = layer_type
a__: Dict = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__: str = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple = RegNetModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __a ( ) ->Dict:
a__: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Tuple = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowercase)
a__: Tuple = self.default_image_processor
a__: str = prepare_img()
a__: Optional[int] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__: List[Any] = model(**lowercase)
# verify the logits
a__: Tuple = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , lowercase)
a__: Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 203
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , __snake_case ):
'''simple docstring'''
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : int = load_tool("""text-classification""" )
self.tool.setup()
UpperCAmelCase_ : List[str] = load_tool("""text-classification""" ,remote=lowerCamelCase_ )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[str] = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
def A__ ( self: Optional[Any] ) -> int:
UpperCAmelCase_ : Dict = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
| 345
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345
| 1
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int | None = None , _lowercase : int | None = None ) ->None:
'''simple docstring'''
if start is None:
a : Tuple = 0
if end is None:
a : Tuple = len(_lowercase ) - 1
if start >= end:
return
a : Optional[Any] = (start + end) // 2
slowsort(_lowercase , _lowercase , _lowercase )
slowsort(_lowercase , mid + 1 , _lowercase )
if sequence[end] < sequence[mid]:
a, a : Optional[Any] = sequence[mid], sequence[end]
slowsort(_lowercase , _lowercase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = '''T5Config'''
def _SCREAMING_SNAKE_CASE ( _lowercase : jnp.array , _lowercase : int , _lowercase : int ) ->jnp.ndarray:
'''simple docstring'''
a : Tuple = jnp.zeros_like(_lowercase )
a : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
a : Dict = shifted_input_ids.at[:, 0].set(_lowercase )
a : Optional[Any] = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __UpperCamelCase ( a__ ):
lowerCamelCase : Any ="""mt5"""
lowerCamelCase : Dict =MTaConfig
class __UpperCamelCase ( a__ ):
lowerCamelCase : str ="""mt5"""
lowerCamelCase : Tuple =MTaConfig
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] ="""mt5"""
lowerCamelCase : Tuple =MTaConfig
| 79
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=0.6, SCREAMING_SNAKE_CASE_=None, ) -> Any:
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : str = patch_size
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : int = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Any = mask_ratio
UpperCamelCase : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase : int = (image_size // patch_size) ** 2
UpperCamelCase : Optional[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> str:
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_UpperCamelCase, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : List[Any] = TFViTMAEModel(config=_UpperCamelCase )
UpperCamelCase : List[Any] = model(_UpperCamelCase, training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Optional[int] = TFViTMAEForPreTraining(_UpperCamelCase )
UpperCamelCase : Tuple = model(_UpperCamelCase, training=_UpperCamelCase )
# expected sequence length = num_patches
UpperCamelCase : Any = (self.image_size // self.patch_size) ** 2
UpperCamelCase : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase : Tuple = 1
UpperCamelCase : str = TFViTMAEForPreTraining(_UpperCamelCase )
UpperCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = model(_UpperCamelCase, training=_UpperCamelCase )
UpperCamelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : int = config_and_inputs
UpperCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
UpperCAmelCase__ : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCAmelCase__ : Tuple = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = TFViTMAEModelTester(self )
UpperCamelCase : str = ConfigTester(self, config_class=_UpperCamelCase, has_text_modality=_UpperCamelCase, hidden_size=37 )
def snake_case_ ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def snake_case_ ( self ) -> int:
pass
def snake_case_ ( self ) -> int:
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase, tf.keras.layers.Layer ) )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(_UpperCamelCase )
UpperCamelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], _UpperCamelCase )
def snake_case_ ( self ) -> int:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
def snake_case_ ( self ) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(_UpperCamelCase )
UpperCamelCase : Optional[Any] = self._prepare_for_class(_UpperCamelCase, _UpperCamelCase )
UpperCamelCase : List[Any] = model(_UpperCamelCase, noise=_UpperCamelCase )
UpperCamelCase : Any = copy.deepcopy(self._prepare_for_class(_UpperCamelCase, _UpperCamelCase ) )
UpperCamelCase : int = model(**_UpperCamelCase, noise=_UpperCamelCase )
UpperCamelCase : Union[str, Any] = outputs_dict[0].numpy()
UpperCamelCase : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1e-6 )
def snake_case_ ( self ) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : int = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_UpperCamelCase ):
UpperCamelCase : Tuple = v.numpy()
else:
UpperCamelCase : List[str] = np.array(_UpperCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(_UpperCamelCase )
UpperCamelCase : List[str] = self._prepare_for_class(_UpperCamelCase, _UpperCamelCase )
UpperCamelCase : List[Any] = prepare_numpy_arrays(_UpperCamelCase )
UpperCamelCase : Any = model(_UpperCamelCase, noise=_UpperCamelCase )
UpperCamelCase : Dict = model(**_UpperCamelCase, noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase, _UpperCamelCase )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCamelCase : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase : Union[str, Any] = tf.constant(_UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase : Optional[int] = tf_noise
super().check_pt_tf_models(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def snake_case_ ( self ) -> str:
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_UpperCamelCase )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_UpperCamelCase, _UpperCamelCase ),)
if isinstance(_UpperCamelCase, _UpperCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_UpperCamelCase, '_keras_serializable', _UpperCamelCase )
}
UpperCamelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase : Dict = tf.convert_to_tensor(_UpperCamelCase )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase : int = main_layer_class(_UpperCamelCase )
UpperCamelCase : str = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase : Any = tf.keras.Model(_UpperCamelCase, outputs=main_layer(_UpperCamelCase ) )
UpperCamelCase : int = model(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Optional[int] = os.path.join(_UpperCamelCase, 'keras_model.h5' )
model.save(_UpperCamelCase )
UpperCamelCase : Tuple = tf.keras.models.load_model(
_UpperCamelCase, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_UpperCamelCase, tf.keras.Model )
UpperCamelCase : Optional[Any] = model(_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase, _UpperCamelCase )
@slow
def snake_case_ ( self ) -> str:
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = model_class(_UpperCamelCase )
UpperCamelCase : Optional[int] = self._prepare_for_class(_UpperCamelCase, _UpperCamelCase )
UpperCamelCase : List[str] = model(_UpperCamelCase, noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase : Optional[Any] = outputs.last_hidden_state.numpy()
UpperCamelCase : int = 0
else:
UpperCamelCase : List[Any] = outputs.logits.numpy()
UpperCamelCase : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase, saved_model=_UpperCamelCase )
UpperCamelCase : List[str] = model_class.from_pretrained(_UpperCamelCase )
UpperCamelCase : Optional[Any] = model(_UpperCamelCase, noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase : int = after_outputs['last_hidden_state'].numpy()
UpperCamelCase : Tuple = 0
else:
UpperCamelCase : int = after_outputs['logits'].numpy()
UpperCamelCase : Any = 0
UpperCamelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCamelCase, 1e-5 )
def snake_case_ ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(_UpperCamelCase )
UpperCamelCase : str = self._prepare_for_class(_UpperCamelCase, _UpperCamelCase )
UpperCamelCase : Union[str, Any] = model(_UpperCamelCase, noise=_UpperCamelCase )
UpperCamelCase : Optional[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_UpperCamelCase )
UpperCamelCase : Union[str, Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase : Optional[int] = model_class.from_config(model.config )
UpperCamelCase : Optional[Any] = new_model(_UpperCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase : List[str] = new_model(_UpperCamelCase, noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase, _UpperCamelCase )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def snake_case_ ( self ) -> Optional[int]:
pass
@slow
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Any = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Any:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> str:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCamelCase : List[Any] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
UpperCamelCase : int = self.default_image_processor
UpperCamelCase : int = prepare_img()
UpperCamelCase : List[Any] = image_processor(images=_UpperCamelCase, return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase : Union[str, Any] = ViTMAEConfig()
UpperCamelCase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase : Union[str, Any] = model(**_UpperCamelCase, noise=_UpperCamelCase )
# verify the logits
UpperCamelCase : Tuple = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape, _UpperCamelCase )
UpperCamelCase : List[Any] = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], _UpperCamelCase, atol=1e-4 )
| 119
|
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
| 8
| 0
|
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_ )
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCamelCase_ )
UpperCamelCase = CLIPTextModelWithProjection(lowerCamelCase_ )
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCamelCase_ )
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
UpperCamelCase = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = sd_pipe(**lowerCamelCase_ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
# forward without prompt embeds
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs["""prompt"""]]
UpperCamelCase = sd_pipe(**lowerCamelCase_ )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = 3 * [inputs.pop("""prompt""" )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
UpperCamelCase = sd_pipe(
**lowerCamelCase_ , prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , pooled_prompt_embeds=lowerCamelCase_ , negative_pooled_prompt_embeds=lowerCamelCase_ , )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict="cpu" , lowerCamelCase_ : List[str]=torch.floataa , lowerCamelCase_ : Tuple=0 ):
"""simple docstring"""
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_inputs(lowerCamelCase_ )
UpperCamelCase = pipe(**lowerCamelCase_ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 165
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class a_ :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *snake_case : Dict ,**snake_case : str ):
pass
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_lowerCamelCase =(
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : Any ,snake_case : Dict ,snake_case : List[Any] ,snake_case : str ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model=snake_case ,tokenizer=snake_case ,image_processor=snake_case )
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE =list(zip(*apply_tesseract(load_image(snake_case ) ,snake_case ,'' ) ) )
SCREAMING_SNAKE_CASE ='What is the placebo?'
SCREAMING_SNAKE_CASE =[
{
'image': load_image(snake_case ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ,snake_case : int ):
SCREAMING_SNAKE_CASE =dqa_pipeline(snake_case ,top_k=2 )
self.assertEqual(
snake_case ,[
[
{'score': ANY(snake_case ), 'answer': ANY(snake_case ), 'start': ANY(snake_case ), 'end': ANY(snake_case )},
{'score': ANY(snake_case ), 'answer': ANY(snake_case ), 'start': ANY(snake_case ), 'end': ANY(snake_case )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pipeline('document-question-answering' ,model='hf-internal-testing/tiny-random-layoutlmv2' )
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='How many cats are there?'
SCREAMING_SNAKE_CASE =[
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 )
self.assertEqual(nested_simplify(snake_case ,decimals=4 ) ,snake_case )
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(nested_simplify(snake_case ,decimals=4 ) ,snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE ='./tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 )
self.assertEqual(snake_case ,[] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE ='./tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,words=snake_case ,boxes=snake_case ,top_k=2 )
self.assertEqual(snake_case ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,max_seq_len=50 ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=snake_case )
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=snake_case ,revision='3dc6de3' ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 ,)
SCREAMING_SNAKE_CASE =list(zip(*apply_tesseract(load_image(snake_case ) ,snake_case ,'' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=snake_case )
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=snake_case ,revision='3dc6de3' ,max_seq_len=50 ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
SCREAMING_SNAKE_CASE =list(zip(*apply_tesseract(load_image(snake_case ) ,snake_case ,'' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
@slow
@require_torch
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='naver-clova-ix/donut-base-finetuned-docvqa' ,tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) ,feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 )
self.assertEqual(nested_simplify(snake_case ,decimals=4 ) ,[{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
| 334
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334
| 1
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """data2vec-audio"""
def __init__( self : int , lowercase_ : int=32 , lowercase_ : Any=768 , lowercase_ : int=12 , lowercase_ : str=12 , lowercase_ : Any=3_072 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=0.1 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=1E-5 , lowercase_ : Optional[int]="gelu" , lowercase_ : Any=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : List[str]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : List[str]=False , lowercase_ : Dict=16 , lowercase_ : int=19 , lowercase_ : List[Any]=5 , lowercase_ : Any=0.05 , lowercase_ : int=10 , lowercase_ : int=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : List[Any]=10 , lowercase_ : str=0 , lowercase_ : int="sum" , lowercase_ : Dict=False , lowercase_ : Dict=False , lowercase_ : List[str]=256 , lowercase_ : Union[str, Any]=(512, 512, 512, 512, 1_500) , lowercase_ : Dict=(5, 3, 3, 1, 1) , lowercase_ : List[Any]=(1, 2, 3, 1, 1) , lowercase_ : Optional[int]=512 , lowercase_ : Optional[int]=0 , lowercase_ : str=1 , lowercase_ : List[Any]=2 , lowercase_ : List[Any]=False , lowercase_ : Any=3 , lowercase_ : List[str]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=None , **lowercase_ : int , ) -> Optional[Any]:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase : str = hidden_size
UpperCAmelCase : int = feat_extract_activation
UpperCAmelCase : List[str] = list(lowercase_ )
UpperCAmelCase : Optional[Any] = list(lowercase_ )
UpperCAmelCase : str = list(lowercase_ )
UpperCAmelCase : Optional[Any] = conv_bias
UpperCAmelCase : Any = num_conv_pos_embeddings
UpperCAmelCase : Dict = num_conv_pos_embedding_groups
UpperCAmelCase : List[str] = conv_pos_kernel_size
UpperCAmelCase : Optional[Any] = len(self.conv_dim )
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Tuple = hidden_dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : Optional[Any] = activation_dropout
UpperCAmelCase : Union[str, Any] = feat_proj_dropout
UpperCAmelCase : Optional[Any] = final_dropout
UpperCAmelCase : Optional[int] = layerdrop
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : str = vocab_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : List[Any] = mask_time_prob
UpperCAmelCase : List[str] = mask_time_length
UpperCAmelCase : Dict = mask_time_min_masks
UpperCAmelCase : Tuple = mask_feature_prob
UpperCAmelCase : Optional[int] = mask_feature_length
UpperCAmelCase : List[str] = mask_feature_min_masks
# ctc loss
UpperCAmelCase : Any = ctc_loss_reduction
UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# adapter
UpperCAmelCase : Optional[int] = add_adapter
UpperCAmelCase : Tuple = adapter_kernel_size
UpperCAmelCase : Any = adapter_stride
UpperCAmelCase : Union[str, Any] = num_adapter_layers
UpperCAmelCase : List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : List[Any] = list(lowercase_ )
UpperCAmelCase : Dict = list(lowercase_ )
UpperCAmelCase : Union[str, Any] = list(lowercase_ )
UpperCAmelCase : Tuple = xvector_output_dim
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
return math.prod(self.conv_stride )
| 280
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 280
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'trajectory_transformer'
_a = ['past_key_values']
_a = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCamelCase_=100 , UpperCamelCase_=5 , UpperCamelCase_=1 , UpperCamelCase_=1 , UpperCamelCase_=249 , UpperCamelCase_=6 , UpperCamelCase_=17 , UpperCamelCase_=25 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0006 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=50256 , UpperCamelCase_=50256 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :str = action_weight
UpperCamelCase__ :Dict = reward_weight
UpperCamelCase__ :Optional[int] = value_weight
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :int = block_size
UpperCamelCase__ :Optional[Any] = action_dim
UpperCamelCase__ :Union[str, Any] = observation_dim
UpperCamelCase__ :int = transition_dim
UpperCamelCase__ :int = learning_rate
UpperCamelCase__ :int = n_layer
UpperCamelCase__ :int = n_head
UpperCamelCase__ :List[Any] = n_embd
UpperCamelCase__ :Optional[int] = embd_pdrop
UpperCamelCase__ :Dict = attn_pdrop
UpperCamelCase__ :List[str] = resid_pdrop
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :Optional[int] = kaiming_initializer_range
UpperCamelCase__ :Dict = use_cache
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 97
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__snake_case = """hf-internal-testing/tiny-random-bert"""
__snake_case = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
__snake_case = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : int = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Dict = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
snake_case : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
snake_case : Any = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="9b8c223" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
snake_case : Optional[Any] = cached_file("tiny-random-bert" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="aaaa" )
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : List[Any] = cached_file(UpperCamelCase__ , "conf" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , ".no_exist" , UpperCamelCase__ , "conf" ) ) )
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = cached_file(UpperCamelCase__ , "conf" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = mock.Mock()
snake_case : List[Any] = 500
snake_case : int = {}
snake_case : Optional[int] = HTTPError
snake_case : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCamelCase__ , revision="ahaha" )
snake_case : int = get_file_from_repo("bert-base-cased" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case : str = json.loads(open(UpperCamelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = Path(UpperCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , "a.txt" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , "b.txt" ) )
| 203
| 0
|
def __lowerCAmelCase ( a__ , a__ ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 33
|
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCamelCase_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCamelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCamelCase_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCamelCase_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCamelCase_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_MAPPING
lowerCamelCase_ = auto_class_update(FlaxAutoModel)
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _UpperCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 79
|
'''simple docstring'''
def __lowercase ( __lowercase = 100 ) -> int:
'''simple docstring'''
_A = n * (n + 1) * (2 * n + 1) / 6
_A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 79
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a__ :
def __init__( self : List[str] , a : Tuple , a : Any=2 , a : int=True , a : Dict=False , a : List[str]=10 , a : Dict=3 , a : Dict=32 * 4 , a : int=32 * 6 , a : str=4 , a : Dict=32 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = mask_feature_size
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=a ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : str ):
"""simple docstring"""
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self : int , a : str , a : Optional[int] , a : List[str] , a : Optional[int]=False ):
"""simple docstring"""
with torch.no_grad():
__lowerCamelCase = MaskFormerModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(pixel_values=a , pixel_mask=a )
__lowerCamelCase = model(a , output_hidden_states=a )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a , a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Optional[int] , a : List[str] , a : Union[str, Any] , a : List[Any] , a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = MaskFormerForInstanceSegmentation(config=a )
model.to(a )
model.eval()
def comm_check_on_output(a : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=a , pixel_mask=a )
__lowerCamelCase = model(a )
comm_check_on_output(a )
__lowerCamelCase = model(
pixel_values=a , pixel_mask=a , mask_labels=a , class_labels=a )
comm_check_on_output(a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCamelCase : Any =(
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] =False
lowerCamelCase : Optional[int] =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Dict =False
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = MaskFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , has_text_modality=a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a , **a , output_hidden_states=a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*a )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCamelCase = MaskFormerModel.from_pretrained(a )
self.assertIsNotNone(a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=a ),
'''mask_labels''': torch.randn((2, 10, *size) , device=a ),
'''class_labels''': torch.zeros(2 , 10 , device=a ).long(),
}
__lowerCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(a )
__lowerCamelCase = model(**a )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a , **a , output_hidden_states=a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(a ).to(a )
__lowerCamelCase = model(**a , output_attentions=a )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(a )
model.to(a )
model.train()
__lowerCamelCase = model(a , mask_labels=a , class_labels=a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(a )
model.to(a )
model.train()
__lowerCamelCase = model(a , mask_labels=a , class_labels=a )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase =1e-4
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class a__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(a )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(a , return_tensors='''pt''' ).to(a )
__lowerCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**a )
__lowerCamelCase = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
__lowerCamelCase = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
__lowerCamelCase = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a , atol=a ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(a )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(a , return_tensors='''pt''' ).to(a )
__lowerCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**a )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__lowerCamelCase = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(a )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(a , return_tensors='''pt''' ).to(a )
__lowerCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**a )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__lowerCamelCase = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(a )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
__lowerCamelCase = inputs['''pixel_values'''].to(a )
__lowerCamelCase = [el.to(a ) for el in inputs['''mask_labels''']]
__lowerCamelCase = [el.to(a ) for el in inputs['''class_labels''']]
with torch.no_grad():
__lowerCamelCase = model(**a )
self.assertTrue(outputs.loss is not None )
| 237
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="unispeech-sat"
def __init__( self : Dict , a : str=32 , a : Any=7_68 , a : Optional[Any]=12 , a : Optional[int]=12 , a : int=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : List[Any]=0.1 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=0.1 , a : List[Any]=0.1 , a : str=0.02 , a : List[Any]=1e-5 , a : int="group" , a : Union[str, Any]="gelu" , a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , a : int=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Any=1_28 , a : Tuple=16 , a : str=False , a : Optional[Any]=True , a : Dict=0.05 , a : List[Any]=10 , a : Any=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Any=3_20 , a : str=2 , a : List[str]=0.1 , a : List[str]=1_00 , a : List[str]=2_56 , a : str=2_56 , a : Dict=0.1 , a : Optional[Any]="mean" , a : str=False , a : Tuple=False , a : Optional[Any]=2_56 , a : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a : int=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : Union[str, Any]=5_12 , a : Optional[int]=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : int=5_04 , **a : Dict , ):
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A_ : Tuple = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 165
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : str = logging.get_logger(__name__)
# TODO: upload to AWS
A_ : Optional[int] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Any = 'retribert'
def __init__( self : Tuple , __UpperCAmelCase : Optional[Any]=3_0_5_2_2 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : List[Any]=3_0_7_2 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : List[Any]=5_1_2 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Any=1e-12 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=1_2_8 , __UpperCAmelCase : Tuple=0 , **__UpperCAmelCase : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = share_encoders
SCREAMING_SNAKE_CASE__ = projection_dim
| 165
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class snake_case_( a__ ):
__UpperCamelCase = 42
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Optional[Any] , UpperCamelCase_ : int = 3_2 , UpperCamelCase_ : int = 6_4 , UpperCamelCase_ : int = 2_0 , UpperCamelCase_ : int = 7_6_8 , UpperCamelCase_ : str=7_7 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = "linear" , UpperCamelCase_ : Optional[str] = "prd" , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , ):
super().__init__()
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[Any] = attention_head_dim
lowerCAmelCase : int = num_attention_heads * attention_head_dim
lowerCAmelCase : Any = additional_embeddings
lowerCAmelCase : Optional[Any] = time_embed_dim or inner_dim
lowerCAmelCase : Any = embedding_proj_dim or embedding_dim
lowerCAmelCase : str = clip_embed_dim or embedding_dim
lowerCAmelCase : Optional[Any] = Timesteps(UpperCamelCase_ , UpperCamelCase_ , 0 )
lowerCAmelCase : int = TimestepEmbedding(UpperCamelCase_ , UpperCamelCase_ , out_dim=UpperCamelCase_ , act_fn=UpperCamelCase_ )
lowerCAmelCase : int = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if embedding_proj_norm_type is None:
lowerCAmelCase : Any = None
elif embedding_proj_norm_type == "layer":
lowerCAmelCase : List[str] = nn.LayerNorm(UpperCamelCase_ )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowerCAmelCase : Dict = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if encoder_hid_proj_type is None:
lowerCAmelCase : str = None
elif encoder_hid_proj_type == "linear":
lowerCAmelCase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowerCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase_ ) )
if added_emb_type == "prd":
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase_ ) )
elif added_emb_type is None:
lowerCAmelCase : Tuple = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowerCAmelCase : Dict = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn='''gelu''' , attention_bias=UpperCamelCase_ , )
for d in range(UpperCamelCase_ )
] )
if norm_in_type == "layer":
lowerCAmelCase : List[Any] = nn.LayerNorm(UpperCamelCase_ )
elif norm_in_type is None:
lowerCAmelCase : Any = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
lowerCAmelCase : Union[str, Any] = nn.LayerNorm(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
lowerCAmelCase : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , UpperCamelCase_ , persistent=UpperCamelCase_ )
lowerCAmelCase : Any = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = {}
def fn_recursive_add_processors(UpperCamelCase_ : str , UpperCamelCase_ : torch.nn.Module , UpperCamelCase_ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase_ , '''set_processor''' ):
lowerCAmelCase : int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
lowerCAmelCase : List[Any] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(UpperCamelCase_ : str , UpperCamelCase_ : torch.nn.Module , UpperCamelCase_ : str ):
if hasattr(UpperCamelCase_ , '''set_processor''' ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[torch.Tensor, float, int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.BoolTensor] = None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Dict = hidden_states.shape[0]
lowerCAmelCase : Any = timestep
if not torch.is_tensor(UpperCamelCase_ ):
lowerCAmelCase : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
lowerCAmelCase : List[Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase : Union[str, Any] = timesteps * torch.ones(UpperCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
lowerCAmelCase : str = self.time_proj(UpperCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCAmelCase : Dict = timesteps_projected.to(dtype=self.dtype )
lowerCAmelCase : int = self.time_embedding(UpperCamelCase_ )
if self.embedding_proj_norm is not None:
lowerCAmelCase : str = self.embedding_proj_norm(UpperCamelCase_ )
lowerCAmelCase : int = self.embedding_proj(UpperCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCAmelCase : List[Any] = self.encoder_hidden_states_proj(UpperCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
lowerCAmelCase : List[Any] = self.proj_in(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.positional_embedding.to(hidden_states.dtype )
lowerCAmelCase : str = []
lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCAmelCase : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCAmelCase : int = hidden_states[:, None, :]
lowerCAmelCase : Union[str, Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCAmelCase : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase_ , -1 , -1 )
additional_embeds.append(UpperCamelCase_ )
lowerCAmelCase : Dict = torch.cat(
UpperCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCAmelCase : str = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCAmelCase : int = F.pad(
UpperCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCAmelCase : Tuple = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCAmelCase : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
lowerCAmelCase : Union[str, Any] = F.pad(UpperCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
lowerCAmelCase : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCAmelCase : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCAmelCase : Optional[int] = self.norm_in(UpperCamelCase_ )
for block in self.transformer_blocks:
lowerCAmelCase : Dict = block(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = self.norm_out(UpperCamelCase_ )
if self.prd_embedding is not None:
lowerCAmelCase : Union[str, Any] = hidden_states[:, -1]
else:
lowerCAmelCase : Optional[int] = hidden_states[:, additional_embeddings_len:]
lowerCAmelCase : Dict = self.proj_to_clip_embeddings(UpperCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 359
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314
| 0
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : str = 'laion/clap-htsat-unfused'
__A : Dict = tempfile.mkdtemp()
def UpperCAmelCase_ ( self , **_A ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_A )
def UpperCAmelCase_ ( self , **_A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_tokenizer()
__A : Any = self.get_feature_extractor()
__A : Union[str, Any] = ClapProcessor(tokenizer=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
__A : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def UpperCAmelCase_ ( self ):
__A : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : List[Any] = self.get_feature_extractor(do_normalize=_A , padding_value=1.0 )
__A : Any = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_feature_extractor()
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[int] = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__A : int = floats_list((3, 1000) )
__A : List[Any] = feature_extractor(_A , return_tensors='np' )
__A : List[str] = processor(audios=_A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_feature_extractor()
__A : str = self.get_tokenizer()
__A : Dict = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__A : Optional[Any] = 'This is a test string'
__A : Any = processor(text=_A )
__A : Optional[int] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_feature_extractor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Dict = processor.batch_decode(_A )
__A : List[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_feature_extractor()
__A : Any = self.get_tokenizer()
__A : Any = ClapProcessor(tokenizer=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 280
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__UpperCAmelCase = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = '''layoutlmv3'''
def __init__( self , _UpperCamelCase=5_0_2_6_5 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=1_2_8 , _UpperCamelCase=1_2_8 , _UpperCamelCase=True , _UpperCamelCase=3_2 , _UpperCamelCase=1_2_8 , _UpperCamelCase=6_4 , _UpperCamelCase=2_5_6 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=2_2_4 , _UpperCamelCase=3 , _UpperCamelCase=1_6 , _UpperCamelCase=None , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(
vocab_size=_UpperCamelCase , hidden_size=_UpperCamelCase , num_hidden_layers=_UpperCamelCase , num_attention_heads=_UpperCamelCase , intermediate_size=_UpperCamelCase , hidden_act=_UpperCamelCase , hidden_dropout_prob=_UpperCamelCase , attention_probs_dropout_prob=_UpperCamelCase , max_position_embeddings=_UpperCamelCase , type_vocab_size=_UpperCamelCase , initializer_range=_UpperCamelCase , layer_norm_eps=_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : str = max_ad_position_embeddings
UpperCAmelCase_ : Union[str, Any] = coordinate_size
UpperCAmelCase_ : Union[str, Any] = shape_size
UpperCAmelCase_ : str = has_relative_attention_bias
UpperCAmelCase_ : Tuple = rel_pos_bins
UpperCAmelCase_ : Dict = max_rel_pos
UpperCAmelCase_ : Any = has_spatial_attention_bias
UpperCAmelCase_ : Optional[Any] = rel_ad_pos_bins
UpperCAmelCase_ : List[str] = max_rel_ad_pos
UpperCAmelCase_ : List[str] = text_embed
UpperCAmelCase_ : Dict = visual_embed
UpperCAmelCase_ : Optional[int] = input_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = version.parse('''1.12''' )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1E-5
@property
def __UpperCAmelCase ( self ) -> int:
return 1_2
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = -1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = 3 , _UpperCamelCase = 4_0 , _UpperCamelCase = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , _UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : Optional[int] = compute_effective_axis_dimension(
_UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : Any = processor.tokenizer.num_special_tokens_to_add(_UpperCamelCase )
UpperCAmelCase_ : Any = compute_effective_axis_dimension(
_UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Tuple = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[str] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : str = self._generate_dummy_images(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[str] = dict(
processor(
_UpperCamelCase , text=_UpperCamelCase , boxes=_UpperCamelCase , return_tensors=_UpperCamelCase , ) )
return inputs
| 145
| 1
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "EncodecFeatureExtractor"
SCREAMING_SNAKE_CASE_ : List[str] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Any , A : Union[str, Any] , A : Tuple ) -> int:
super().__init__(A , A )
lowercase_ : int = self.feature_extractor
lowercase_ : Union[str, Any] = False
def A ( self : Dict , A : Optional[Any]=None , A : Optional[Any]=None , A : List[Any]=True ) -> Dict:
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self : Optional[int] , *A : str , **A : Any ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
lowercase_ : Any = kwargs.pop('''audio''' , A )
lowercase_ : Dict = kwargs.pop('''sampling_rate''' , A )
lowercase_ : Dict = kwargs.pop('''text''' , A )
if len(A ) > 0:
lowercase_ : Optional[int] = args[0]
lowercase_ : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
lowercase_ : Tuple = self.tokenizer(A , **A )
if audio is not None:
lowercase_ : List[str] = self.feature_extractor(A , *A , sampling_rate=A , **A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowercase_ : Optional[Any] = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
lowercase_ : List[str] = audio_inputs['''padding_mask''']
return inputs
def A ( self : str , *A : Optional[int] , **A : List[str] ) -> str:
lowercase_ : List[str] = kwargs.pop('''audio''' , A )
lowercase_ : List[str] = kwargs.pop('''padding_mask''' , A )
if len(A ) > 0:
lowercase_ : Union[str, Any] = args[0]
lowercase_ : str = args[1:]
if audio_values is not None:
return self._decode_audio(A , padding_mask=A )
else:
return self.tokenizer.batch_decode(*A , **A )
def A ( self : Optional[int] , *A : Tuple , **A : List[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*A , **A )
def A ( self : Any , A : Dict , A : Optional = None ) -> List[np.ndarray]:
lowercase_ : Optional[Any] = to_numpy(A )
lowercase_ , lowercase_ , lowercase_ : str = audio_values.shape
if padding_mask is None:
return list(A )
lowercase_ : List[Any] = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowercase_ : int = seq_len - padding_mask.shape[-1]
lowercase_ : List[str] = 1 - self.feature_extractor.padding_value
lowercase_ : str = np.pad(A , ((0, 0), (0, difference)) , '''constant''' , constant_values=A )
lowercase_ : Union[str, Any] = audio_values.tolist()
for i in range(A ):
lowercase_ : List[Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowercase_ : str = sliced_audio.reshape(A , -1 )
return audio_values
| 33
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 1
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : str ):
snake_case : Optional[Any] = tmp_path / "cache"
snake_case : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case : str = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
snake_case : List[Any] = tmp_path / "cache"
snake_case : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case : Tuple = features.copy() if features else default_expected_features
snake_case : List[str] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case : List[str] = ParquetDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Optional[Any] = tmp_path / "cache"
snake_case : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case : int = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
if issubclass(__lowerCamelCase , __lowerCamelCase ):
snake_case : Optional[int] = parquet_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
snake_case : Any = [parquet_path]
snake_case : Union[str, Any] = tmp_path / "cache"
snake_case : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case : int = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=("train",) ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
snake_case : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
snake_case : List[str] = tmp_path / "cache"
snake_case : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case : List[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str ):
snake_case : str = tmp_path / "cache"
snake_case : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case : Dict = features.copy() if features else default_expected_features
snake_case : str = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case : List[Any] = ParquetDatasetReader({"train": parquet_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
if split:
snake_case : str = {split: parquet_path}
else:
snake_case : Optional[Any] = "train"
snake_case : Tuple = {"train": parquet_path, "test": parquet_path}
snake_case : Optional[Any] = tmp_path / "cache"
snake_case : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case : int = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
snake_case : List[Any] = ParquetDatasetWriter(__lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
snake_case : Optional[int] = pq.ParquetFile(tmp_path / "foo.parquet" )
snake_case : Tuple = pf.read()
assert dataset.data.table == output_table
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
snake_case : int = str(shared_datadir / "test_image_rgb.jpg" )
snake_case : Optional[int] = {"image": [image_path]}
snake_case : Tuple = Features({"image": Image()} )
snake_case : Optional[Any] = Dataset.from_dict(__lowerCamelCase , features=__lowerCamelCase )
snake_case : Optional[int] = ParquetDatasetWriter(__lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
snake_case : Optional[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
snake_case : str = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
assert get_writer_batch_size(__lowerCamelCase ) == expected
| 10
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10
| 1
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( _lowerCamelCase : Features ):
A__ = np.inf
def set_batch_size(_lowerCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
A__ = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Tuple , lowercase_ :NestedDataStructureLike[PathLike] , lowercase_ :Optional[NamedSplit] = None , lowercase_ :Optional[Features] = None , lowercase_ :str = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :Optional[int] = None , **lowercase_ :Tuple , )-> Union[str, Any]:
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
A__ = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
A__ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A__ = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self :Any )-> Optional[int]:
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase :
def __init__( self :Optional[int] , lowercase_ :Dataset , lowercase_ :Union[PathLike, BinaryIO] , lowercase_ :Optional[int] = None , **lowercase_ :Any , )-> int:
A__ = dataset
A__ = path_or_buf
A__ = batch_size or get_writer_batch_size(dataset.features )
A__ = parquet_writer_kwargs
def UpperCAmelCase_ ( self :List[str] )-> int:
A__ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A__ = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs )
else:
A__ = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :BinaryIO , lowercase_ :int , **lowercase_ :Any )-> int:
A__ = 0
A__ = parquet_writer_kwargs.pop("path_or_buf" , lowercase_ )
A__ = self.dataset.features.arrow_schema
A__ = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowercase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A__ = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_ )
written += batch.nbytes
writer.close()
return written
| 237
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
__lowercase = ["""note_seq"""]
def __init__( self :Optional[Any] , *lowercase_ :List[Any] , **lowercase_ :List[str] )-> int:
requires_backends(self , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowercase_ :Union[str, Any] , **lowercase_ :Any )-> Optional[int]:
requires_backends(cls , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :Dict , *lowercase_ :Tuple , **lowercase_ :List[Any] )-> Optional[Any]:
requires_backends(cls , ["note_seq"] )
| 237
| 1
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _snake_case ( _SCREAMING_SNAKE_CASE : str = "" ) -> dict[str, float]:
"""simple docstring"""
lowerCAmelCase = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
lowerCAmelCase = BeautifulSoup(requests.get(a__ ).text , """html.parser""" )
lowerCAmelCase = soup.find_all("""td""" , attrs="""titleColumn""" )
lowerCAmelCase = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a__ , a__ )
}
def _snake_case ( _SCREAMING_SNAKE_CASE : str = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
lowerCAmelCase = get_imdb_top_aaa_movies()
with open(a__ , """w""" , newline="""""" ) as out_file:
lowerCAmelCase = csv.writer(a__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 360
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowerCAmelCase = tmp_path_factory.getbasetemp() / """cache"""
lowerCAmelCase = test_hf_cache_home / """datasets"""
lowerCAmelCase = test_hf_cache_home / """metrics"""
lowerCAmelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="""session""" )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _SCREAMING_SNAKE_CASE )
| 187
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ (A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : str = 'BlipImageProcessor'
SCREAMING_SNAKE_CASE : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : List[str] ,lowercase__ : int ,lowercase__ : Union[str, Any] ):
__lowercase = False
super().__init__(__lowerCamelCase ,__lowerCamelCase )
__lowercase = self.image_processor
def __call__( self : List[str] ,lowercase__ : ImageInput = None ,lowercase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowercase__ : bool = True ,lowercase__ : Union[bool, str, PaddingStrategy] = False ,lowercase__ : Union[bool, str, TruncationStrategy] = None ,lowercase__ : Optional[int] = None ,lowercase__ : int = 0 ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = True ,lowercase__ : Optional[Union[str, TensorType]] = None ,**lowercase__ : int ,):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__lowercase = self.tokenizer
__lowercase = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
return text_encoding
# add pixel_values
__lowercase = self.image_processor(__lowerCamelCase ,return_tensors=__lowerCamelCase )
if text is not None:
__lowercase = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
else:
__lowercase = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def SCREAMING_SNAKE_CASE ( self : Any ,*lowercase__ : Union[str, Any] ,**lowercase__ : int ):
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ,*lowercase__ : Tuple ,**lowercase__ : Optional[Any] ):
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 104
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 0
|
from math import sqrt
def snake_case_ ( snake_case = 1_00_00_00 ) -> int:
lowercase__: int = 0
lowercase__: int = 0
lowercase__: int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCAmelCase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__lowerCAmelCase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> List[Any]:
'''simple docstring'''
lowercase__: str = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase__: List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
lowercase__: Union[str, Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 288
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if len(a_ ) == 0:
return array
_UpperCAmelCase , _UpperCAmelCase : Any = min(a_ ), max(a_ )
# Compute the variables
_UpperCAmelCase : List[str] = _max - _min + 1
_UpperCAmelCase , _UpperCAmelCase : List[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCAmelCase : List[Any] = i - _min
_UpperCAmelCase : Union[str, Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCAmelCase : str = 0
for i in range(a_ ):
while holes_repeat[i] > 0:
_UpperCAmelCase : List[str] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by comma:\n')
__a = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 145
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__a = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__a = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__a = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__a = ''
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: Any, a_: Any ):
assert ReadMe.from_string(a_, a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: Optional[int], a_: int ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
_UpperCAmelCase : Union[str, Any] = ReadMe.from_string(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str, a_: Optional[Any] ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str ):
ReadMe.from_string(a_, a_, suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: str, a_: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Tuple = ReadMe.from_readme(a_, a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: List[Any], a_: str ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
_UpperCAmelCase : str = ReadMe.from_readme(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Tuple, a_: Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[str] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
ReadMe.from_readme(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_, a_, suppress_parsing_errors=a_ )
| 145
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10
| 1
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 2_00 ) -> int:
_snake_case = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_snake_case = [0] * (pence + 1)
_snake_case = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 359
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self : Optional[int] , _lowerCamelCase : int = 0 ):
_snake_case = key
def lowercase ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
def lowercase ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
def A_ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
pass
def A_ ( self : Dict ) -> int:
pass
def A_ ( self : List[Any] ) -> List[str]:
pass
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray , UpperCAmelCase : float ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(UpperCAmelCase , UpperCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A_ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=None , **UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = FlaxVisionTextDualEncoderModel(UpperCAmelCase )
lowerCamelCase__ : int = model(input_ids=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def A_ ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Dict=None , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_vision_text_model(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCamelCase__ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A_ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : List[str] ) -> Dict:
lowerCamelCase__ , lowerCamelCase__ : str = self.get_vision_text_model(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = {'vision_model': vision_model, 'text_model': text_model}
lowerCamelCase__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : int = model(input_ids=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : List[Any] = after_output[0]
lowerCamelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str=None , **UpperCAmelCase : List[Any] ) -> str:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.get_vision_text_model(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = {'vision_model': vision_model, 'text_model': text_model}
lowerCamelCase__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase )
lowerCamelCase__ : int = model(
input_ids=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , output_attentions=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(UpperCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[int] = to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A_ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ) -> str:
pt_model.to(UpperCAmelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Optional[Any] = inputs_dict
lowerCamelCase__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : Optional[int] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCamelCase__ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = VisionTextDualEncoderModel.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
pt_model_loaded.to(UpperCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCAmelCase , pt_output_loaded.numpy() , 4e-2 )
def A_ ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Dict = VisionTextDualEncoderModel(UpperCAmelCase )
lowerCamelCase__ : Tuple = FlaxVisionTextDualEncoderModel(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCamelCase__ : Dict = fx_state
self.check_pt_flax_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ) -> str:
lowerCamelCase__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Any = VisionTextDualEncoderModel(UpperCAmelCase )
lowerCamelCase__ : Dict = FlaxVisionTextDualEncoderModel(UpperCAmelCase )
lowerCamelCase__ : Dict = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCAmelCase )
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCAmelCase )
def A_ ( self : Any ) -> Optional[int]:
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**UpperCAmelCase )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCAmelCase )
@is_pt_flax_cross_test
def A_ ( self : int ) -> List[str]:
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ : Dict = config_inputs_dict.pop('vision_config' )
lowerCamelCase__ : List[Any] = config_inputs_dict.pop('text_config' )
lowerCamelCase__ : Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.check_equivalence_flax_to_pt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def A_ ( self : Any ) -> Dict:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.get_pretrained_model_and_inputs()
lowerCamelCase__ : int = model_a(**UpperCAmelCase )
lowerCamelCase__ : List[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model_a(**UpperCAmelCase )
lowerCamelCase__ : int = after_outputs[0]
lowerCamelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase , 1e-5 )
@require_flax
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=UpperCAmelCase , text_from_pt=UpperCAmelCase , )
lowerCamelCase__ : List[str] = 13
lowerCamelCase__ : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCamelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A_ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ) -> Optional[int]:
lowerCamelCase__ : str = FlaxViTModel(UpperCAmelCase )
lowerCamelCase__ : int = FlaxBertModel(UpperCAmelCase )
return vision_model, text_model
def A_ ( self : Any ) -> Optional[int]:
lowerCamelCase__ : Any = FlaxViTModelTester(self )
lowerCamelCase__ : Tuple = FlaxBertModelTester(self )
lowerCamelCase__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Dict = vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=UpperCAmelCase , text_from_pt=UpperCAmelCase , )
lowerCamelCase__ : List[Any] = 13
lowerCamelCase__ : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] = random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A_ ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ) -> Any:
lowerCamelCase__ : Any = FlaxCLIPVisionModel(UpperCAmelCase )
lowerCamelCase__ : Any = FlaxBertModel(UpperCAmelCase )
return vision_model, text_model
def A_ ( self : List[Any] ) -> List[Any]:
lowerCamelCase__ : Any = FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[str] = FlaxBertModelTester(self )
lowerCamelCase__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : int = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Tuple = vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : int ) -> str:
lowerCamelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase__ : Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : int = model(**UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCamelCase__ : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) )
| 50
|
from torch import nn
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 187
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : Any ={
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = 'tapas'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1024 , __lowerCAmelCase=[3, 256, 256, 2, 256, 256, 10] , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=10.0 , __lowerCAmelCase=0 , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=1.0 , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase="ratio" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=64 , __lowerCAmelCase=32 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_sizes
lowercase = initializer_range
lowercase = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase = positive_label_weight
lowercase = num_aggregation_labels
lowercase = aggregation_loss_weight
lowercase = use_answer_as_supervision
lowercase = answer_loss_importance
lowercase = use_normalized_answer_loss
lowercase = huber_loss_delta
lowercase = temperature
lowercase = aggregation_temperature
lowercase = use_gumbel_for_cells
lowercase = use_gumbel_for_aggregation
lowercase = average_approximation_function
lowercase = cell_selection_preference
lowercase = answer_loss_cutoff
lowercase = max_num_rows
lowercase = max_num_columns
lowercase = average_logits_per_cell
lowercase = select_one_column
lowercase = allow_empty_column_selection
lowercase = init_cell_selection_weights_to_zero
lowercase = reset_position_index_per_cell
lowercase = disable_per_token_loss
# Aggregation hyperparameters
lowercase = aggregation_labels
lowercase = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCAmelCase ):
lowercase = {int(__lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 358
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 0
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
A_ = "path-to-your-trained-model"
A_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
A_ = "A photo of sks dog in a bucket"
A_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 139
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __lowerCamelCase ):
for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 288
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(A__ ) , "Tatoeba directory does not exist." )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCamelCase )
@slow
def lowercase_ ( self : List[str] ) -> Union[str, Any]:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 218
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = ["input_features"]
def __init__( self : Dict , __lowerCamelCase : Tuple=80 , __lowerCamelCase : List[Any]=1_6000 , __lowerCamelCase : Optional[int]=160 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : List[Any]=400 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : str=False , **__lowerCamelCase : List[str] , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = n_fft
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = chunk_length
SCREAMING_SNAKE_CASE__ = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE__ = self.n_samples // hop_length
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase_ ( self : int , __lowerCamelCase : np.array ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = spectrogram(
__lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE__ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE__ = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase , np.intaa )
SCREAMING_SNAKE_CASE__ = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
normed_input_values.append(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[str] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "max_length" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE__ = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE__ = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE__ = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE__ = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE__ = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def lowercase_ ( self : str ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 218
| 1
|
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( __A = 20 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase__ = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
a__ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 80
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11
| 0
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class __magic_name__ :
def __init__( self : int ):
_a : Optional[int] = psutil.Process()
_a : str = False
def __lowercase ( self : Optional[Any] ):
_a : str = -1
while True:
_a : Dict = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __lowercase ( self : List[str] ):
_a : List[str] = True
_a : Tuple = threading.Thread(target=self.peak_monitor )
_a : Optional[Any] = True
self.thread.start()
def __lowercase ( self : Union[str, Any] ):
_a : Any = False
self.thread.join()
return self.cpu_memory_peak
__lowerCAmelCase = PeakCPUMemory()
def __lowerCamelCase ( ) -> Tuple:
# Time
_a : Optional[int] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_a : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_a : Union[str, Any] = torch.cuda.memory_allocated(lowerCAmelCase_ )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
# Time
_a : Any = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_a : List[str] = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
_a : Union[str, Any] = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_a : int = (torch.cuda.memory_allocated(lowerCAmelCase_ ) - start_measures[str(lowerCAmelCase_ )]) / 2**20
_a : List[str] = (torch.cuda.max_memory_allocated(lowerCAmelCase_ ) - start_measures[str(lowerCAmelCase_ )]) / 2**20
return measures
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(lowerCAmelCase_ )]:.2f}MiB""" )
_a : Union[str, Any] = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 107
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : int = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[str]:
if model_type == "text":
_a : List[str] = BarkSemanticModel
_a : Optional[Any] = BarkSemanticConfig
_a : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : Tuple = BarkCoarseModel
_a : str = BarkCoarseConfig
_a : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_a : List[str] = BarkFineModel
_a : Optional[Any] = BarkFineConfig
_a : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : Dict = f"""{model_type}_small""" if use_small else model_type
_a : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_a : int = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_a : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_a : Dict = model_args['vocab_size']
_a : Dict = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : List[Any] = model_args.pop('n_head' )
_a : Any = model_args.pop('n_embd' )
_a : List[Any] = model_args.pop('n_layer' )
_a : Optional[int] = ConfigClass(**checkpoint['model_args'] )
_a : List[str] = ModelClass(config=lowerCAmelCase_ )
_a : Tuple = GenerationConfigClass()
_a : Optional[Any] = model_generation_config
_a : Optional[Any] = checkpoint['model']
# fixup checkpoint
_a : int = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_a : str = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_a : List[Any] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_a : List[Any] = state_dict.pop(lowerCAmelCase_ )
_a : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_a : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_a : Dict = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_a : Tuple = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : Optional[int] = 'cpu' # do conversion on cpu
_a : Tuple = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_a : List[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_a : Any = _bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_a : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_a : Any = 5
_a : List[str] = 10
if model_type in ["text", "coarse"]:
_a : Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_a : Dict = bark_model(lowerCAmelCase_ )[0]
_a : Tuple = model(lowerCAmelCase_ )
# take last logits
_a : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_a : List[str] = 3
_a : List[Any] = 8
_a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_a : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Any:
_a : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Any = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[Any] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_a : str = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_a : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_a : int = BarkFineModel.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_a : Any = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_a : Optional[Any] = BarkModel(lowerCAmelCase_ )
_a : List[str] = semantic
_a : Union[str, Any] = coarseAcoustic
_a : Optional[int] = fineAcoustic
_a : Optional[Any] = codec
_a : List[Any] = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 107
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
while second != 0:
__UpperCamelCase :List[str] = first & second
first ^= second
__UpperCamelCase :Union[str, Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = int(input('''Enter the first number: ''').strip())
__lowercase = int(input('''Enter the second number: ''').strip())
print(F'{add(first, second) = }')
| 43
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a_ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase , cache_dir=__UpperCAmelCase)
a_ = [t[-1] for t in os.walk(os.path.join(__UpperCAmelCase , os.listdir(__UpperCAmelCase)[0] , "snapshots"))]
a_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin") for f in files)
@slow
@require_flax
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 4
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_514_745) < 1E-3
assert np.abs(np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 49_947.875) < 5E-1
a_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(__UpperCAmelCase) == num_samples
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__UpperCAmelCase)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_652_401)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_383_808.2)) < 5E-1
def UpperCAmelCase__ ( self) ->str:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_373_516.75)) < 5E-1
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_373_516.75)) < 5E-1
def UpperCAmelCase__ ( self) ->Tuple:
a_ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
a_ = scheduler.create_state()
a_ = scheduler_state
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045_043_945)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_347_693.5)) < 5E-1
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = jax.random.split(jax.random.PRNGKey(0) , __UpperCAmelCase)
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , )
a_ = replicate(__UpperCAmelCase)
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , use_memory_efficient_attention=__UpperCAmelCase , )
a_ = replicate(__UpperCAmelCase)
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a: Optional[int] = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __UpperCamelCase ( UpperCAmelCase=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowercase__ ) )
class UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = dataset_module_factory(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path , dataset=SCREAMING_SNAKE_CASE__ )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ , hash=dataset_module.hash , )
lowercase__ : Any = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=SCREAMING_SNAKE_CASE__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Dict = cached_path(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ )
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE__ ) )
@pytest.mark.integration
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[str] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / 'test_wikipedia_simple'
lowercase__ : Union[str, Any] = dataset_module_factory('''wikipedia''' , cache_dir=__A )
lowercase__ : Dict = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=__A , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[Any] = None
builder_instance.download_and_prepare()
lowercase__ : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = dataset_module_factory('''wikipedia''' , cache_dir=__A )
lowercase__ : Any = import_main_class(dataset_module.module_path , dataset=__A )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=__A , config_name='''20220301.frr''' , hash=dataset_module.hash , )
lowercase__ : Tuple = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__A , __A )
assert "train" in ds
assert isinstance(ds['''train'''] , __A )
assert next(iter(ds['''train'''] ) )
| 198
|
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str:
"""simple docstring"""
assert len(str(__A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a_ : List[str] = year // 1_00
a_ : Optional[int] = (5 * (century % 4) + 2) % 7
a_ : List[str] = year % 1_00
a_ : str = centurian % 12
a_ : List[str] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a_ : Any = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a_ : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 0
|
import inspect
import unittest
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE = inspect.getmembers(a , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE = 'k-diffusion'
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!'''
| 367
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 327
| 0
|
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =int(_snake_case )
if decimal in (0, 1): # Exit cases for the recursion
return str(_snake_case )
__a , __a =divmod(_snake_case , 2 )
return binary_recursive(_snake_case ) + str(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =str(_snake_case ).strip()
if not number:
raise ValueError('No input value was provided' )
__a ='-' if number.startswith('-' ) else ''
__a =number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F'{negative}0b{binary_recursive(int(_snake_case ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 218
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Union[str, Any] = 16
_lowerCAmelCase : List[str] = 32
def UpperCamelCase_( _snake_case : Accelerator , _snake_case : int = 16 ):
"""simple docstring"""
__a =AutoTokenizer.from_pretrained('bert-base-cased' )
__a =load_dataset('glue' , 'mrpc' )
def tokenize_function(_snake_case : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__a =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a =datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_snake_case : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a =16
elif accelerator.mixed_precision != "no":
__a =8
else:
__a =None
return tokenizer.pad(
_snake_case , padding='longest' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__a =DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
__a =DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : List[Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _snake_case ) == "1":
__a =2
# New Code #
__a =int(args.gradient_accumulation_steps )
__a =int(args.local_sgd_steps )
# Initialize accelerator
__a =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a =config['lr']
__a =int(config['num_epochs'] )
__a =int(config['seed'] )
__a =int(config['batch_size'] )
__a =evaluate.load('glue' , 'mrpc' )
set_seed(_snake_case )
__a , __a =get_dataloaders(_snake_case , _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a =model.to(accelerator.device )
# Instantiate optimizer
__a =AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
__a =get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a =accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
__a =model(**_snake_case )
__a =output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a =model(**_snake_case )
__a =outputs.logits.argmax(dim=-1 )
__a , __a =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
__a =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_snake_case , default=_snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_snake_case , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a =parser.parse_args()
__a ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 218
| 1
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ = False
a_ = logging.get_logger(__name__)
a_ = "ybelkada/fonts"
def a__ ( ) -> Any:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
requires_backends(__lowercase , ["torch"] )
_check_torch_version()
_A = image_tensor.unsqueeze(0 )
_A = torch.nn.functional.unfold(__lowercase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_A = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __lowercase , __lowercase , -1 )
_A = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def a__ ( __lowercase , __lowercase = 36 , __lowercase = "black" , __lowercase = "white" , __lowercase = 5 , __lowercase = 5 , __lowercase = 5 , __lowercase = 5 , __lowercase = None , __lowercase = None , ) -> Image.Image:
requires_backends(__lowercase , "vision" )
# Add new lines so that each line is no more than 80 characters.
_A = textwrap.TextWrapper(width=80 )
_A = wrapper.wrap(text=__lowercase )
_A = "\n".join(__lowercase )
if font_bytes is not None and font_path is None:
_A = io.BytesIO(__lowercase )
elif font_path is not None:
_A = font_path
else:
_A = hf_hub_download(__lowercase , "Arial.TTF" )
_A = ImageFont.truetype(__lowercase , encoding="UTF-8" , size=__lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_A = ImageDraw.Draw(Image.new("RGB" , (1, 1) , __lowercase ) )
_A , _A , _A , _A = temp_draw.textbbox((0, 0) , __lowercase , __lowercase )
# Create the actual image with a bit of padding around the text.
_A = text_width + left_padding + right_padding
_A = text_height + top_padding + bottom_padding
_A = Image.new("RGB" , (image_width, image_height) , __lowercase )
_A = ImageDraw.Draw(__lowercase )
draw.text(xy=(left_padding, top_padding) , text=__lowercase , fill=__lowercase , font=__lowercase )
return image
def a__ ( __lowercase , __lowercase , **__lowercase ) -> List[Any]:
requires_backends(__lowercase , "vision" )
# Convert to PIL image if necessary
_A = to_pil_image(__lowercase )
_A = render_text(__lowercase , **__lowercase )
_A = max(header_image.width , image.width )
_A = int(image.height * (new_width / image.width) )
_A = int(header_image.height * (new_width / header_image.width) )
_A = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_A = to_numpy_array(__lowercase )
if infer_channel_dimension_format(__lowercase ) == ChannelDimension.LAST:
_A = to_channel_dimension_format(__lowercase , ChannelDimension.LAST )
return new_image
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['flattened_patches']
def __init__( self : str , a__ : bool = True , a__ : bool = True , a__ : Dict[str, int] = None , a__ : int = 20_48 , a__ : bool = False , **a__ : List[Any] , ) -> None:
'''simple docstring'''
super().__init__(**a__ )
_A = patch_size if patch_size is not None else {"height": 16, "width": 16}
_A = do_normalize
_A = do_convert_rgb
_A = max_patches
_A = is_vqa
def a_ ( self : Tuple , a__ : np.ndarray , a__ : int , a__ : dict , **a__ : str ) -> np.ndarray:
'''simple docstring'''
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
_A = to_channel_dimension_format(a__ , ChannelDimension.FIRST )
_A = torch.from_numpy(a__ )
_A , _A = patch_size["height"], patch_size["width"]
_A , _A = get_image_size(a__ )
# maximize scale s.t.
_A = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_A = max(min(math.floor(scale * image_height / patch_height ) , a__ ) , 1 )
_A = max(min(math.floor(scale * image_width / patch_width ) , a__ ) , 1 )
_A = max(num_feasible_rows * patch_height , 1 )
_A = max(num_feasible_cols * patch_width , 1 )
_A = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=a__ , antialias=a__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_A = torch_extract_patches(a__ , a__ , a__ )
_A = patches.shape
_A = patches_shape[1]
_A = patches_shape[2]
_A = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_A = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_A = torch.arange(a__ ).reshape([rows, 1] ).repeat(1 , a__ ).reshape([rows * columns, 1] )
_A = torch.arange(a__ ).reshape([1, columns] ).repeat(a__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_A = row_ids.to(torch.floataa )
_A = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_A = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_A = torch.nn.functional.pad(a__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
_A = to_numpy_array(a__ )
return result
def a_ ( self : int , a__ : np.ndarray , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any ) -> np.ndarray:
'''simple docstring'''
if image.dtype == np.uinta:
_A = image.astype(np.floataa )
# take mean across the whole `image`
_A = np.mean(a__ )
_A = np.std(a__ )
_A = max(a__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(a__ , mean=a__ , std=a__ , **a__ )
def a_ ( self : List[str] , a__ : ImageInput , a__ : Optional[str] = None , a__ : bool = None , a__ : Optional[bool] = None , a__ : Optional[int] = None , a__ : Optional[Dict[str, int]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Dict , ) -> ImageInput:
'''simple docstring'''
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A = patch_size if patch_size is not None else self.patch_size
_A = max_patches if max_patches is not None else self.max_patches
_A = self.is_vqa
if kwargs.get("data_format" , a__ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
_A = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
_A = [to_numpy_array(a__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
_A = kwargs.pop("font_bytes" , a__ )
_A = kwargs.pop("font_path" , a__ )
if isinstance(a__ , a__ ):
_A = [header_text] * len(a__ )
_A = [
render_header(a__ , header_text[i] , font_bytes=a__ , font_path=a__ )
for i, image in enumerate(a__ )
]
if do_normalize:
_A = [self.normalize(image=a__ ) for image in images]
# convert to torch tensor and permute
_A = [
self.extract_flattened_patches(image=a__ , max_patches=a__ , patch_size=a__ )
for image in images
]
# create attention mask in numpy
_A = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_A = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=a__ )
return encoded_outputs
| 360
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a__ ( __lowercase ) -> Any:
random.seed(__lowercase )
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# ^^ safe to call this function even if cuda is not available
class snake_case :
def __init__( self : str , a__ : Iterable[torch.nn.Parameter] , a__ : float = 0.9_9_9_9 , a__ : float = 0.0 , a__ : int = 0 , a__ : bool = False , a__ : Union[float, int] = 1.0 , a__ : Union[float, int] = 2 / 3 , a__ : Optional[Any] = None , a__ : Dict[str, Any] = None , **a__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a__ , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , a__ , standard_warn=a__ , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get("max_value" , a__ ) is not None:
_A = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , a__ , standard_warn=a__ )
_A = kwargs["max_value"]
if kwargs.get("min_value" , a__ ) is not None:
_A = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , a__ , standard_warn=a__ )
_A = kwargs["min_value"]
_A = list(a__ )
_A = [p.clone().detach() for p in parameters]
if kwargs.get("device" , a__ ) is not None:
_A = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , a__ , standard_warn=a__ )
self.to(device=kwargs["device"] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def a_ ( cls : Dict , a__ : str , a__ : str ) -> "EMAModel":
'''simple docstring'''
_A , _A = model_cls.load_config(a__ , return_unused_kwargs=a__ )
_A = model_cls.from_pretrained(a__ )
_A = cls(model.parameters() , model_cls=a__ , model_config=model.config )
ema_model.load_state_dict(a__ )
return ema_model
def a_ ( self : List[Any] , a__ : List[str] ) -> int:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop("shadow_params" , a__ )
model.register_to_config(**a__ )
self.copy_to(model.parameters() )
model.save_pretrained(a__ )
def a_ ( self : str , a__ : int ) -> float:
'''simple docstring'''
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(a__ , self.decay )
# make sure decay is not smaller than min_decay
_A = max(a__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a_ ( self : List[Any] , a__ : Iterable[torch.nn.Parameter] ) -> Optional[int]:
'''simple docstring'''
if isinstance(a__ , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , a__ , standard_warn=a__ , )
_A = parameters.parameters()
_A = list(a__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(a__ , modifier_rank=a__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a__ )
def a_ ( self : Dict , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
_A = list(a__ )
for s_param, param in zip(self.shadow_params , a__ ):
param.data.copy_(s_param.to(param.device ).data )
def a_ ( self : List[str] , a__ : int=None , a__ : List[Any]=None ) -> None:
'''simple docstring'''
_A = [
p.to(device=a__ , dtype=a__ ) if p.is_floating_point() else p.to(device=a__ )
for p in self.shadow_params
]
def a_ ( self : Tuple ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a_ ( self : Union[str, Any] , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
_A = [param.detach().cpu().clone() for param in parameters]
def a_ ( self : Union[str, Any] , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , a__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def a_ ( self : Optional[Any] , a__ : dict ) -> None:
'''simple docstring'''
_A = copy.deepcopy(a__ )
_A = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_A = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , a__ ):
raise ValueError("Invalid min_decay" )
_A = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , a__ ):
raise ValueError("Invalid optimization_step" )
_A = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , a__ ):
raise ValueError("Invalid update_after_step" )
_A = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a__ ):
raise ValueError("Invalid use_ema_warmup" )
_A = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_A = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_A = state_dict.get("shadow_params" , a__ )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , a__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(a__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 163
| 0
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None ) -> List[Any]:
a = self.layer[current_layer](__lowerCamelCase , __lowerCamelCase , head_mask[current_layer] )
a = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple ) -> str:
super().__init__(__lowerCamelCase )
a = BertEncoderWithPabee(__lowerCamelCase )
self.init_weights()
a = 0
a = 0
a = 0
a = 0
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] ) -> List[str]:
a = threshold
def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple ) -> Any:
a = patience
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
a = 0
a = 0
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
a = self.inference_layers_num / self.inference_instances_num
a = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__lowerCamelCase )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=False , ) -> Optional[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
a = input_ids.size()
elif inputs_embeds is not None:
a = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a = torch.ones(__lowerCamelCase , device=__lowerCamelCase )
if token_type_ids is None:
a = torch.zeros(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a = self.get_extended_attention_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a , a , a = encoder_hidden_states.size()
a = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a = torch.ones(__lowerCamelCase , device=__lowerCamelCase )
a = self.invert_attention_mask(__lowerCamelCase )
else:
a = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a = self.get_head_mask(__lowerCamelCase , self.config.num_hidden_layers )
a = self.embeddings(
input_ids=__lowerCamelCase , position_ids=__lowerCamelCase , token_type_ids=__lowerCamelCase , inputs_embeds=__lowerCamelCase )
a = embedding_output
if self.training:
a = []
for i in range(self.config.num_hidden_layers ):
a = self.encoder.adaptive_forward(
__lowerCamelCase , current_layer=__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase )
a = self.pooler(__lowerCamelCase )
a = output_layers[i](output_dropout(__lowerCamelCase ) )
res.append(__lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
a = self.encoder(
__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
a = self.pooler(encoder_outputs[0] )
a = [output_layers[self.config.num_hidden_layers - 1](__lowerCamelCase )]
else:
a = 0
a = None
a = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a = self.encoder.adaptive_forward(
__lowerCamelCase , current_layer=__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase )
a = self.pooler(__lowerCamelCase )
a = output_layers[i](__lowerCamelCase )
if regression:
a = logits.detach()
if patient_result is not None:
a = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a = 0
else:
a = logits.detach().argmax(dim=1 )
if patient_result is not None:
a = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowerCamelCase ) ):
patient_counter += 1
else:
a = 0
a = logits
if patient_counter == self.patience:
break
a = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Any ) -> Union[str, Any]:
super().__init__(__lowerCamelCase )
a = config.num_labels
a = BertModelWithPabee(__lowerCamelCase )
a = nn.Dropout(config.hidden_dropout_prob )
a = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Any=None , ) -> Optional[int]:
a = self.bert(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a = (logits[-1],)
if labels is not None:
a = None
a = 0
for ix, logits_item in enumerate(__lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a = (total_loss / total_weights,) + outputs
return outputs
| 107
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __magic_name__ ( A : Union[str, Any], A : str, A : Optional[int]=None, A : List[str]=None ):
'''simple docstring'''
if attention_mask is None:
a = tf.cast(tf.math.not_equal(A, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = OPTConfig
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : List[str] = """gelu"""
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Any=4 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=20 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=16 , ) -> Any:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = embed_dim
a = word_embed_proj_dim
a = False
def __UpperCAmelCase ( self : str ) -> int:
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowerCamelCase , **self.config_updates , )
a = prepare_opt_inputs_dict(__lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> List[str]:
a = TFOPTModel(config=__lowerCamelCase )
a = inputs_dict["input_ids"]
a = input_ids[:1, :]
a = inputs_dict["attention_mask"][:1, :]
a = 1
# first forward pass
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = 10
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = TFOPTModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowerCamelCase : Tuple , __lowerCamelCase : int ):
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a = model_class(config=__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __lowerCamelCase )
# check that weights remain the same after resizing
a = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __lowerCamelCase )
a = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
return tf.constant(A, dtype=tf.intaa )
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 99
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = tf.ones((4, 1) , dtype=tf.intaa ) * 2
a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a = input_ids.shape[0]
a = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = TFOPTModel.from_pretrained("facebook/opt-350m" )
a = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a = tf.not_equal(__lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
a = model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase ).last_hidden_state
a = (1, 11, 5_12)
self.assertEqual(output.shape , __lowerCamelCase )
a = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-3 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = xla_generate(__lowerCamelCase , __lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-2 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
a = "facebook/opt-350m"
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = TFOPTForCausalLM.from_pretrained(self.path_model )
a = GPTaTokenizer.from_pretrained(self.path_model )
a = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
a = "facebook/opt-125m"
a = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Dict:
a = "facebook/opt-350m"
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
a = "left"
# use different length sentences to test batching
a = [
"Hello, my dog is a little",
"Today, I",
]
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase )
a = inputs["input_ids"]
a = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["attention_mask"] )
a = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase )
a = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
a = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
a = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
a = "facebook/opt-350m"
a = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 107
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : int = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 248
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = False , lowerCAmelCase = 100 , lowerCAmelCase = 0.01 , lowerCAmelCase = 1 , ):
'''simple docstring'''
UpperCAmelCase = False
UpperCAmelCase = search_prob
UpperCAmelCase = start_temperate
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = None
while not search_end:
UpperCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase = current_state
scores.append(lowerCAmelCase )
iterations += 1
UpperCAmelCase = None
UpperCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
UpperCAmelCase = neighbors.pop(lowerCAmelCase )
UpperCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase = picked_neighbor
else:
UpperCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase = picked_neighbor
UpperCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase = True
else:
UpperCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[str] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
lowerCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 248
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase_ = HfApi()
lowercase_ = {}
# fmt: off
lowercase_ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase_ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase_ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase_ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase_ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase_ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase_ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase_ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase_ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase_ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase_ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase_ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase_ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase_ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase_ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase_ = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase_ = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
lowercase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowercase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 303
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : Any = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
def snake_case (A_ :int ):
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(A_ , A_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
a : List[Any] = False
if num < 0:
a : Optional[int] = True
a : Dict = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case_ ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__( self : Dict , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**_A )
__magic_name__ = size if size is not None else {'shortest_edge': 256}
__magic_name__ = get_size_dict(_A , default_to_square=_A )
__magic_name__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__magic_name__ = get_size_dict(_A )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__magic_name__ = get_resize_output_image_size(_A , size=size["""shortest_edge"""] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(_A )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _lowercase ( self : Any , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> int:
"""simple docstring"""
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(_A , default_to_square=_A )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(_A )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 88
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
A__ = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
A__ = '''|'''.join(sys.argv[1:])
A__ = re.compile(rf"""^({joined_dirs}).*?\.py$""")
A__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 368
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ = logging.get_logger(__name__)
A__ = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """bart"""
__lowerCAmelCase : Any = ["""past_key_values"""]
__lowerCAmelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :int ,__lowercase :Union[str, Any]=5_0_2_6_5 ,__lowercase :Optional[int]=1_0_2_4 ,__lowercase :int=1_2 ,__lowercase :Tuple=4_0_9_6 ,__lowercase :str=1_6 ,__lowercase :List[Any]=1_2 ,__lowercase :str=4_0_9_6 ,__lowercase :List[str]=1_6 ,__lowercase :Optional[int]=0.0 ,__lowercase :List[str]=0.0 ,__lowercase :int="gelu" ,__lowercase :int=1_0_2_4 ,__lowercase :Any=0.1 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[Any]=0.0 ,__lowercase :Tuple=0.02 ,__lowercase :List[str]=0.0 ,__lowercase :int=False ,__lowercase :Any=True ,__lowercase :List[str]=3 ,__lowercase :List[Any]=1 ,__lowercase :List[str]=0 ,__lowercase :List[str]=2 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=2 ,__lowercase :Dict=2 ,**__lowercase :List[str] ,):
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : int = encoder_layers
snake_case__ : Union[str, Any] = encoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Any = decoder_layers
snake_case__ : Union[str, Any] = decoder_attention_heads
snake_case__ : int = dropout
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = activation_dropout
snake_case__ : List[Any] = activation_function
snake_case__ : Any = init_std
snake_case__ : Union[str, Any] = encoder_layerdrop
snake_case__ : Optional[int] = decoder_layerdrop
snake_case__ : List[Any] = classifier_dropout
snake_case__ : Tuple = use_cache
snake_case__ : List[str] = encoder_layers
snake_case__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__lowercase ,pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,is_encoder_decoder=__lowercase ,decoder_start_token_id=__lowercase ,forced_eos_token_id=__lowercase ,**__lowercase ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,__lowercase ):
snake_case__ : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : Union[str, Any] = {0: '''batch'''}
snake_case__ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : Any = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ : Dict = self.num_layers
for i in range(__lowercase ):
snake_case__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCamelCase ( self :Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : List[str] = super().outputs
else:
snake_case__ : List[str] = super(__lowercase ,self ).outputs
if self.use_past:
snake_case__ , snake_case__ : Any = self.num_layers
for i in range(__lowercase ):
snake_case__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Generate decoder inputs
snake_case__ : List[Any] = seq_length if not self.use_past else 1
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
snake_case__ : Any = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : List[str] = dict(**__lowercase ,**__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : Union[str, Any] = common_inputs['''input_ids'''].shape
snake_case__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ : Dict = self.num_attention_heads
snake_case__ : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[int] = decoder_seq_length + 3
snake_case__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase ,__lowercase )] ,dim=1 )
snake_case__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : List[Any] = self.num_layers
snake_case__ : List[Any] = min(__lowercase ,__lowercase )
snake_case__ : Dict = max(__lowercase ,__lowercase ) - min_num_layers
snake_case__ : Union[str, Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
snake_case__ : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase ,__lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowerCamelCase ( self :List[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : Dict = seqlen + 2
snake_case__ , snake_case__ : Tuple = self.num_layers
snake_case__ , snake_case__ : List[str] = self.num_attention_heads
snake_case__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : int = common_inputs['''attention_mask'''].dtype
snake_case__ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase ,__lowercase ,dtype=__lowercase )] ,dim=1 )
snake_case__ : Union[str, Any] = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowerCamelCase ( self :str ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Optional[int] = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : str = tokenizer.num_special_tokens_to_add(__lowercase )
snake_case__ : int = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Optional[Any] = dict(tokenizer(__lowercase ,return_tensors=__lowercase ) )
return common_inputs
def __lowerCamelCase ( self :int ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
elif self.task == "causal-lm":
snake_case__ : int = self._generate_dummy_inputs_for_causal_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
else:
snake_case__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
return common_inputs
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :Tuple ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = super()._flatten_past_key_values_(__lowercase ,__lowercase ,__lowercase ,__lowercase )
else:
snake_case__ : int = super(__lowercase ,self )._flatten_past_key_values_(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
| 44
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.