code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""GLPNFeatureExtractor"""]
lowerCAmelCase_ = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326
|
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ , UpperCAmelCase__: int = [], []
while len(SCREAMING_SNAKE_CASE ) > 1:
UpperCAmelCase__ , UpperCAmelCase__: str = min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
start.append(SCREAMING_SNAKE_CASE )
end.append(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] =input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase : Dict =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 113
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Initialise PyTorch model
__snake_case : Optional[Any] = TaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
__snake_case : Dict = TaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 203
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 203
| 1
|
from maths.prime_factors import prime_factors
def a (lowerCAmelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__a = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(UpperCamelCase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCAmelCase__ =" \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
__lowercase = self.diffusers_dir
shutil.copy(
os.path.join(A_ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : List[str] , A_ : int , A_ : Optional[Any] , A_ : str=None ):
'''simple docstring'''
__lowercase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__lowercase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__lowercase = black.format_str(A_ , mode=A_ )
__lowercase = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(A_ , """w""" , newline="""\n""" ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , """r""" ) as f:
self.assertTrue(f.read() , A_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , A_ ) , )
# Copy consistency with a really long name
__lowercase = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , A_ , overwrite_result=re.sub("""DDPM""" , """Test""" , A_ ) , )
| 616
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCamelCase = TypeVar("""T""")
class _SCREAMING_SNAKE_CASE (Generic[T] ):
def __init__( self : Tuple , UpperCamelCase : list[T] , UpperCamelCase : Callable[[T, T], T] )->str:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Optional[int] = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = [any_type for _ in range(self.N )] + arr
__SCREAMING_SNAKE_CASE : Optional[Any] = fnc
self.build()
def __snake_case ( self : Optional[int] )->Tuple:
for p in range(self.N - 1 , 0 , -1 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __snake_case ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : T )->List[str]:
p += self.N
__SCREAMING_SNAKE_CASE : Union[str, Any] = v
while p > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = p // 2
__SCREAMING_SNAKE_CASE : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __snake_case ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int )->Union[str, Any]: # noqa: E741
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = l + self.N, r + self.N
__SCREAMING_SNAKE_CASE : Optional[Any] = None
while l <= r:
if l % 2 == 1:
__SCREAMING_SNAKE_CASE : Dict = self.st[l] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.st[r] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[r] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCamelCase = SegmentTree(test_array, min)
_lowerCamelCase = SegmentTree(test_array, max)
_lowerCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def _lowerCAmelCase ( ):
"""simple docstring"""
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE : int = reduce(_UpperCAmelCase , test_array[i : j + 1] )
__SCREAMING_SNAKE_CASE : Optional[int] = reduce(_UpperCAmelCase , test_array[i : j + 1] )
__SCREAMING_SNAKE_CASE : List[Any] = reduce(lambda __lowerCamelCase , __lowerCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
_lowerCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 716
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 447
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase_ = False
class _snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ):
lowercase__ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = "A painting of a squirrel eating a burger "
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=__lowercase, generator=__lowercase, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
lowercase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe(
prompt=__lowercase, generator=__lowercase, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A__ ( self : str ):
lowercase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = "A painting of a squirrel eating a burger "
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=__lowercase, generator=__lowercase, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 37
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
A_ : str = 'data2vec-text'
def __init__(self : str , a__ : Tuple=3_0522 , a__ : Dict=768 , a__ : Tuple=12 , a__ : Dict=12 , a__ : Tuple=3072 , a__ : int="gelu" , a__ : str=0.1 , a__ : str=0.1 , a__ : Tuple=512 , a__ : Any=2 , a__ : List[Any]=0.0_2 , a__ : Optional[Any]=1E-12 , a__ : List[str]=1 , a__ : List[Any]=0 , a__ : List[Any]=2 , a__ : Any="absolute" , a__ : List[str]=True , a__ : List[Any]=None , **a__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
@property
def a (self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 592
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58
| 0
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase = 10_00 ) ->int:
"""simple docstring"""
__lowercase : str = 3
__lowercase : List[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
return abs(_lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a, _lowerCamelCase )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowercase ,__lowercase : Any = y, x % y
return abs(_lowerCamelCase )
def snake_case__ ( ) ->Optional[int]:
"""simple docstring"""
try:
__lowercase : Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
__lowercase : Optional[Any] = int(nums[0] )
__lowercase : str = int(nums[1] )
print(
F'greatest_common_divisor({num_a}, {num_a}) = '
F'{greatest_common_divisor(_lowerCamelCase, _lowerCamelCase )}' )
print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase, _lowerCamelCase )}' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 281
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = CustomTokenizer
pass
| 100
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663
| 0
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase , _lowercase , _lowercase = False, False, False
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : Optional[Any] = None
_lowercase : Dict = True
_lowercase : str = True
_lowercase : Any = None
# Automatically constructed
_lowercase : Union[str, Any] = '''dict'''
_lowercase : Optional[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowercase : List[Any] = field(default='''Audio''' , init=__A , repr=__A )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def _lowercase ( self , _lowercase ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(_lowercase , _lowercase ):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCAmelCase = BytesIO()
sf.write(_lowercase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
_lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
_lowerCAmelCase = BytesIO(bytes() )
sf.write(_lowercase , _lowercase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
_lowerCAmelCase = (value['''path'''], BytesIO(value["""bytes"""] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
_lowerCAmelCase = xsplitext(_lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
_lowerCAmelCase = token_per_repo_id or {}
_lowerCAmelCase = path.split("""::""" )[-1]
try:
_lowerCAmelCase = string_to_dict(_lowercase , config.HUB_DATASETS_URL )['''repo_id''']
_lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCAmelCase = None
with xopen(_lowercase , """rb""" , use_auth_token=_lowercase ) as f:
_lowerCAmelCase = sf.read(_lowercase )
else:
_lowerCAmelCase = sf.read(_lowercase )
_lowerCAmelCase = array.T
if self.mono:
_lowerCAmelCase = librosa.to_mono(_lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCAmelCase = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate )
_lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
_lowerCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.binary() )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.string() )
_lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
_lowerCAmelCase = pa.array([Audio().encode_example(_lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_lowerCAmelCase = storage.field("""bytes""" )
else:
_lowerCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_lowerCAmelCase = storage.field("""path""" )
else:
_lowerCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.string() )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_lowercase , self.pa_type )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_lowercase ):
with xopen(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
return bytes_
_lowerCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCAmelCase = pa.array(
[os.path.basename(_lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowercase , self.pa_type )
| 708
|
'''simple docstring'''
import os
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase = len(grid[0] )
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
_lowerCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_lowerCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_lowerCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_lowerCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_lowerCAmelCase = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
_lowerCAmelCase = max_product
return largest
def A ():
_lowerCAmelCase = []
with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
_lowerCAmelCase = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 162
| 0
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number | (1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[Any]=1_3 , UpperCamelCase : int=7 , UpperCamelCase : str=True , UpperCamelCase : Tuple=True , UpperCamelCase : int=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=9_9 , UpperCamelCase : Union[str, Any]=3_2 , UpperCamelCase : Dict=5 , UpperCamelCase : List[Any]=4 , UpperCamelCase : List[str]=3_7 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Optional[int]=5_1_2 , UpperCamelCase : Union[str, Any]=1_6 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[int]=0.0_2 , UpperCamelCase : str=4 , )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : List[str] = use_attention_mask
__SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
__SCREAMING_SNAKE_CASE : int = use_labels
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
def __snake_case ( self : Optional[Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __snake_case ( self : Dict )->Any:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : Dict = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE (__UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __snake_case ( self : List[str] )->Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = FlaxRoFormerModelTester(self )
@slow
def __snake_case ( self : Optional[Any] )->int:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def __snake_case ( self : int )->Dict:
__SCREAMING_SNAKE_CASE : Tuple = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = 5_0_0_0_0
__SCREAMING_SNAKE_CASE : List[str] = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
| 708
|
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Any = format(__lowerCamelCase , "08x" )[-8:]
__SCREAMING_SNAKE_CASE : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = b""
for char in message:
bit_string += format(__lowerCamelCase , "08b" ).encode("utf-8" )
__SCREAMING_SNAKE_CASE : List[str] = format(len(__lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__lowerCamelCase ) , 512 ):
__SCREAMING_SNAKE_CASE : int = bit_string[pos : pos + 512]
__SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = format(__lowerCamelCase , "032b" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCamelCase , 2 )
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return (a + b) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = preprocess(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__SCREAMING_SNAKE_CASE : Tuple = 0X67452301
__SCREAMING_SNAKE_CASE : Optional[Any] = 0Xefcdab89
__SCREAMING_SNAKE_CASE : Optional[int] = 0X98badcfe
__SCREAMING_SNAKE_CASE : Optional[Any] = 0X10325476
__SCREAMING_SNAKE_CASE : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = aa
__SCREAMING_SNAKE_CASE : Union[str, Any] = ba
__SCREAMING_SNAKE_CASE : str = ca
__SCREAMING_SNAKE_CASE : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__SCREAMING_SNAKE_CASE : Union[str, Any] = d ^ (b & (c ^ d))
__SCREAMING_SNAKE_CASE : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__SCREAMING_SNAKE_CASE : int = c ^ (d & (b ^ c))
__SCREAMING_SNAKE_CASE : int = (5 * i + 1) % 16
elif i <= 47:
__SCREAMING_SNAKE_CASE : List[str] = b ^ c ^ d
__SCREAMING_SNAKE_CASE : Union[str, Any] = (3 * i + 5) % 16
else:
__SCREAMING_SNAKE_CASE : Any = c ^ (b | not_aa(__lowerCamelCase ))
__SCREAMING_SNAKE_CASE : str = (7 * i) % 16
__SCREAMING_SNAKE_CASE : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
__SCREAMING_SNAKE_CASE : Dict = d
__SCREAMING_SNAKE_CASE : str = c
__SCREAMING_SNAKE_CASE : Tuple = b
__SCREAMING_SNAKE_CASE : Optional[int] = sum_aa(__lowerCamelCase , left_rotate_aa(__lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__SCREAMING_SNAKE_CASE : Dict = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = KarrasVeScheduler()
__lowerCamelCase = KarrasVePipeline(unet=_snake_case , scheduler=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=2 , generator=_snake_case , output_type='''numpy''' ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=2 , generator=_snake_case , output_type='''numpy''' , return_dict=_snake_case )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''google/ncsnpp-celebahq-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(_snake_case )
__lowerCamelCase = KarrasVeScheduler()
__lowerCamelCase = KarrasVePipeline(unet=_snake_case , scheduler=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=20 , generator=_snake_case , output_type='''numpy''' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 316
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowercase__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase__ ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AudioClassificationPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
# test with a raw waveform
lowercase__ = np.zeros((34_000,) )
lowercase__ = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def lowercase__ ( self : str, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = examples
lowercase__ = audio_classifier(lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
lowercase__ = audio_classifier(lowerCamelCase, top_k=1 )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
self.run_torchaudio(lowerCamelCase )
@require_torchaudio
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
import datasets
# test with a local file
lowercase__ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
lowercase__ = dataset[0]['''audio''']['''array''']
lowercase__ = audio_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
@require_torch
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = np.ones((8_000,) )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
lowercase__ = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase__ = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase__ = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
import datasets
lowercase__ = '''superb/wav2vec2-base-superb-ks'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = datasets.load_dataset('''anton-l/superb_dummy''', '''ks''', split='''test''' )
lowercase__ = np.array(dataset[3]['''speech'''], dtype=np.floataa )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=3 ), [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
], )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
| 183
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
UpperCamelCase__ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": 15_36,
"""junnyu/roformer_chinese_base""": 15_36,
"""junnyu/roformer_chinese_char_small""": 5_12,
"""junnyu/roformer_chinese_char_base""": 5_12,
"""junnyu/roformer_small_discriminator""": 1_28,
"""junnyu/roformer_small_generator""": 1_28,
}
UpperCamelCase__ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Tuple = RoFormerTokenizer
def __init__( self : str , __lowercase : Tuple=None , __lowercase : List[str]=None , __lowercase : int=True , __lowercase : List[Any]="[UNK]" , __lowercase : Any="[SEP]" , __lowercase : List[str]="[PAD]" , __lowercase : int="[CLS]" , __lowercase : Optional[int]="[MASK]" , __lowercase : List[str]=True , __lowercase : Optional[Any]=None , **__lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __lowercase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __lowercase ) != strip_accents
):
UpperCAmelCase_ = getattr(__lowercase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = pre_tok_class(**__lowercase )
UpperCAmelCase_ = do_lower_case
def __getstate__( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = d
UpperCAmelCase_ = self.__dict__["""_tokenizer"""].get_vocab()
UpperCAmelCase_ = PreTokenizer.custom(JiebaPreTokenizer(__lowercase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=None ):
'''simple docstring'''
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def SCREAMING_SNAKE_CASE ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any]=None , __lowercase : Dict=None , __lowercase : Tuple=False , **__lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase_ = BertPreTokenizer()
return super().save_pretrained(__lowercase , __lowercase , __lowercase , __lowercase , **__lowercase )
| 713
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A_( A , A , **A ):
UpperCAmelCase_ = AutoConfig.from_pretrained(A , **A )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_config(A )
model.save_pretrained(A )
AutoTokenizer.from_pretrained(A ).save_pretrained(A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 486
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__SCREAMING_SNAKE_CASE : int = 25_00_04
__SCREAMING_SNAKE_CASE : Optional[int] = 25_00_20
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( snake_case, unittest.TestCase ):
_lowerCAmelCase = MBartTokenizer
_lowerCAmelCase = MBartTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
def _A ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Optional[int] = MBartTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Optional[Any] ):
lowerCAmelCase : str = MBartTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
lowerCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _A ( self : Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
lowerCAmelCase : str = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(lowerCamelCase__ )
lowerCAmelCase : Optional[int] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
lowerCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(lowerCamelCase__ )
lowerCAmelCase : Tuple = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : List[str] = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
lowerCAmelCase : List[str] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
lowerCAmelCase : Tuple = tokenizer_r.from_pretrained(lowerCamelCase__ )
lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
lowerCAmelCase : List[str] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
_lowerCAmelCase = "facebook/mbart-large-en-ro"
_lowerCAmelCase = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_lowerCAmelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_lowerCAmelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _A ( cls : Optional[Any] ):
lowerCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase : List[str] = 1
return cls
def _A ( self : Tuple ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def _A ( self : int ):
lowerCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def _A ( self : str ):
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
lowerCAmelCase : Optional[int] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase : Tuple = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
lowerCAmelCase : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def _A ( self : List[str] ):
lowerCAmelCase : List[str] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = 1_0
lowerCAmelCase : Tuple = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
def _A ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def _A ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
lowerCAmelCase : Dict = MBartTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ )
@require_torch
def _A ( self : Optional[int] ):
lowerCAmelCase : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors='''pt''' )
lowerCAmelCase : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _A ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase : Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _A ( self : List[Any] ):
lowerCAmelCase : Tuple = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase : Dict = targets['''input_ids''']
lowerCAmelCase : str = shift_tokens_right(lowerCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _A ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 348
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE : Optional[int] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase__ ( __magic_name__ : Tuple ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase__ ( __magic_name__ : int , __magic_name__ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Tuple = False
elif args.student_type == "gpt2":
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase__ ( __magic_name__ : Tuple , __magic_name__ : int ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Optional[Any] = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__magic_name__ , required=__magic_name__ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__magic_name__ , required=__magic_name__ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__magic_name__ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__magic_name__ , required=__magic_name__ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__magic_name__ , type=__magic_name__ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__magic_name__ , required=__magic_name__ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__magic_name__ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__magic_name__ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__magic_name__ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__magic_name__ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__magic_name__ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__magic_name__ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__magic_name__ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__magic_name__ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__magic_name__ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__magic_name__ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__magic_name__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=__magic_name__ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=__magic_name__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__magic_name__ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__magic_name__ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__magic_name__ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__magic_name__ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__magic_name__ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__magic_name__ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__magic_name__ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__magic_name__ , default=40_00 , help='''Checkpoint interval.''' )
lowerCAmelCase : List[Any] = parser.parse_args()
sanity_checks(__magic_name__ )
# ARGS #
init_gpu_params(__magic_name__ )
set_seed(__magic_name__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__magic_name__ ) , __magic_name__ , indent=4 )
git_log(args.dump_path )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = MODEL_CLASSES[args.student_type]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase : List[Any] = tokenizer.all_special_tokens.index(__magic_name__ )
lowerCAmelCase : Dict = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
lowerCAmelCase : Optional[int] = special_tok_ids
lowerCAmelCase : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
lowerCAmelCase : Optional[int] = pickle.load(__magic_name__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
lowerCAmelCase : Tuple = pickle.load(__magic_name__ )
lowerCAmelCase : Tuple = np.maximum(__magic_name__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase : List[str] = 0.0 # do not predict special tokens
lowerCAmelCase : Any = torch.from_numpy(__magic_name__ )
else:
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = LmSeqsDataset(params=__magic_name__ , data=__magic_name__ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
lowerCAmelCase : Tuple = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase : str = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowerCAmelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=__magic_name__ )
else:
lowerCAmelCase : str = student_model_class(__magic_name__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
lowerCAmelCase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__magic_name__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__magic_name__ , __magic_name__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__magic_name__ , __magic_name__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase : List[Any] = Distiller(
params=__magic_name__ , dataset=__magic_name__ , token_probs=__magic_name__ , student=__magic_name__ , teacher=__magic_name__ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 348
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class A__ ( _lowerCamelCase , _lowerCamelCase):
A_ : Any = 'resnet'
A_ : Optional[int] = ['basic', 'bottleneck']
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=[2_56, 5_12, 10_24, 20_48] , _SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , _SCREAMING_SNAKE_CASE="bottleneck" , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : Optional[Any] = embedding_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : Optional[int] = layer_type
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : List[str] = downsample_in_first_stage
__lowerCAmelCase : Union[str, Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class A__ ( _lowerCamelCase):
A_ : int = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-3
| 549
|
"""simple docstring"""
# Imports
import numpy as np
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if red is not None:
__lowerCAmelCase : Optional[int] = red
if green is not None:
__lowerCAmelCase : Any = green
if blue is not None:
__lowerCAmelCase : str = blue
if red_edge is not None:
__lowerCAmelCase : Dict = red_edge
if nir is not None:
__lowerCAmelCase : Dict = nir
return True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def __lowerCamelCase ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCamelCase ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCamelCase ( self ):
return self.nir * (self.red / (self.green**2))
def __lowerCamelCase ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCamelCase ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.08 , _SCREAMING_SNAKE_CASE=1.22 , _SCREAMING_SNAKE_CASE=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCamelCase ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.nir / self.green) - 1
def __lowerCamelCase ( self ):
return (self.nir / self.redEdge) - 1
def __lowerCamelCase ( self ):
return (self.red - self.blue) / self.red
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowerCamelCase ( self ):
return self.nir - self.green
def __lowerCamelCase ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCamelCase ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
return (self.nir - b) / (a * self.red)
def __lowerCamelCase ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCamelCase ( self ):
return (self.red + self.green + self.blue) / 30.5
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCamelCase ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCamelCase ( self ):
return self.green / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.nir / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.red / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return (self.green - self.red) / (self.green + self.red)
def __lowerCamelCase ( self ):
return (self.red - self.green) / (self.red + self.green)
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowerCamelCase ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCamelCase ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 549
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A_ : str = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Tuple = spec.loader.load_module()
A_ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Any = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A_ : Optional[int] = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def snake_case () -> Optional[Any]:
UpperCamelCase_: str = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase_: int = False
# source code of `config_class`
UpperCamelCase_: int = inspect.getsource(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = _re_checkpoint.findall(UpperCAmelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase_ ,UpperCamelCase_: int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase_: List[str] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase_: Optional[Any] = True
break
UpperCamelCase_: Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
UpperCamelCase_: List[Any] = '\n'.join(sorted(UpperCAmelCase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 57
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
| 0
|
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 1_000 ):
__UpperCAmelCase , __UpperCAmelCase = 1, 1
__UpperCAmelCase = 2
while True:
__UpperCAmelCase = 0
__UpperCAmelCase = fa + fa
__UpperCAmelCase , __UpperCAmelCase = fa, f
index += 1
for _ in str(snake_case_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 721
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_lowerCAmelCase )} )
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
a__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
a__ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
a__ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
a__ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
a__ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "train"
a__ : List[Any] = "dev"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : SquadDataTrainingArguments
a__ : List[SquadFeatures]
a__ : Split
a__ : bool
def __init__( self : Optional[Any] , _lowercase : SquadDataTrainingArguments , _lowercase : PreTrainedTokenizer , _lowercase : Optional[int] = None , _lowercase : Union[str, Split] = Split.train , _lowercase : Optional[bool] = False , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "pt" , ):
__UpperCAmelCase = args
__UpperCAmelCase = is_language_sensitive
__UpperCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
__UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__UpperCAmelCase = mode
# Load data features from cache or dataset file
__UpperCAmelCase = '''v2''' if args.version_2_with_negative else '''v1'''
__UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
__UpperCAmelCase = time.time()
__UpperCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase = self.old_features['''features''']
__UpperCAmelCase = self.old_features.get('''dataset''' , _lowercase )
__UpperCAmelCase = self.old_features.get('''examples''' , _lowercase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
__UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
__UpperCAmelCase , __UpperCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
__UpperCAmelCase = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Any , _lowercase : Optional[int] ):
# Convert to Tensors and build dataset
__UpperCAmelCase = self.features[i]
__UpperCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__UpperCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__UpperCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 397
| 0
|
UpperCAmelCase_ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> int:
_A = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(_snake_case )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_snake_case , _snake_case )
operand_stack.push(_snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 2
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2
| 1
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :int , __magic_name__ :Tuple , __magic_name__ :int=2 , __magic_name__ :Dict=True , __magic_name__ :Optional[int]=False , __magic_name__ :Optional[Any]=10 , __magic_name__ :Tuple=3 , __magic_name__ :List[Any]=32 * 8 , __magic_name__ :Union[str, Any]=32 * 8 , __magic_name__ :Optional[Any]=4 , __magic_name__ :Optional[Any]=64 , ) -> str:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = is_training
a__ = use_auxiliary_loss
a__ = num_queries
a__ = num_channels
a__ = min_size
a__ = max_size
a__ = num_labels
a__ = hidden_dim
a__ = hidden_dim
def _UpperCamelCase ( self :Tuple ) -> int:
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__magic_name__ )
a__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ )
a__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5
).float()
a__ = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long()
a__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
a__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ = self.num_queries
a__ = self.num_labels
a__ = [1, 1, 1, 1]
a__ = self.num_channels
a__ = 64
a__ = 128
a__ = self.hidden_dim
a__ = self.hidden_dim
a__ = self.hidden_dim
return config
def _UpperCamelCase ( self :str ) -> int:
'''simple docstring'''
a__ , a__ , a__ , a__ , a__ = self.prepare_config_and_inputs()
a__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _UpperCamelCase ( self :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple ) -> Dict:
'''simple docstring'''
a__ = output.encoder_hidden_states
a__ = output.pixel_decoder_hidden_states
a__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , config.decoder_layers )
def _UpperCamelCase ( self :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :str , __magic_name__ :Optional[int] , __magic_name__ :List[str]=False ) -> Any:
'''simple docstring'''
with torch.no_grad():
a__ = MaskaFormerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
a__ = model(__magic_name__ , output_hidden_states=__magic_name__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__magic_name__ , __magic_name__ )
def _UpperCamelCase ( self :Any , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Any , __magic_name__ :Union[str, Any] , __magic_name__ :int ) -> Any:
'''simple docstring'''
a__ = MaskaFormerForUniversalSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
def comm_check_on_output(__magic_name__ :Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a__ = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
a__ = model(__magic_name__ )
comm_check_on_output(__magic_name__ )
a__ = model(
pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
comm_check_on_output(__magic_name__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case__ : Dict = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
snake_case__ : str = False
snake_case__ : int = False
snake_case__ : int = False
snake_case__ : List[Any] = False
def _UpperCamelCase ( self :str ) -> List[str]:
'''simple docstring'''
a__ = MaskaFormerModelTester(self )
a__ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def _UpperCamelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self :Any ) -> str:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def _UpperCamelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__magic_name__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def _UpperCamelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def _UpperCamelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def _UpperCamelCase ( self :str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def _UpperCamelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _UpperCamelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
pass
def _UpperCamelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__magic_name__ )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
@slow
def _UpperCamelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ = MaskaFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _UpperCamelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
a__ = (self.model_tester.min_size,) * 2
a__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__magic_name__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__magic_name__ ),
'''class_labels''': torch.zeros(2 , 10 , device=__magic_name__ ).long(),
}
a__ = self.model_tester.get_config()
a__ = MaskaFormerForUniversalSegmentation(__magic_name__ ).to(__magic_name__ )
a__ = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def _UpperCamelCase ( self :int ) -> Tuple:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__magic_name__ ).to(__magic_name__ )
a__ = model(**__magic_name__ , output_attentions=__magic_name__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ = self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ = self.model_tester.prepare_config_and_inputs()
a__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
a__ = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss
loss.backward()
def _UpperCamelCase ( self :List[Any] ) -> str:
'''simple docstring'''
a__ = self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ = self.model_tester.prepare_config_and_inputs()
a__ = True
a__ = True
a__ = model_class(__magic_name__ ).to(__magic_name__ )
model.train()
a__ = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
a__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__magic_name__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase : str = 1E-4
def __snake_case ( ) -> Dict:
"""simple docstring"""
a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self :List[Any] ) -> int:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
a__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__magic_name__ )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
a__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ = model(**__magic_name__ )
a__ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
a__ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
a__ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def _UpperCamelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval()
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
a__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ = model(**__magic_name__ )
# masks_queries_logits
a__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a__ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
a__ = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
a__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a__ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def _UpperCamelCase ( self :Dict ) -> str:
'''simple docstring'''
a__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval()
a__ = self.default_image_processor
a__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
a__ = inputs['''pixel_values'''].to(__magic_name__ )
a__ = [el.to(__magic_name__ ) for el in inputs['''mask_labels''']]
a__ = [el.to(__magic_name__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
a__ = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
| 158
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : str = ['image_processor', 'tokenizer']
snake_case__ : List[str] = 'FlavaImageProcessor'
snake_case__ : Dict = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self :List[Any] , __magic_name__ :List[Any]=None , __magic_name__ :int=None , **__magic_name__ :List[Any] ) -> List[str]:
'''simple docstring'''
a__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __magic_name__ , )
a__ = kwargs.pop('''feature_extractor''' )
a__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__magic_name__ , __magic_name__ )
a__ = self.image_processor
def __call__( self :Any , __magic_name__ :Optional[ImageInput] = None , __magic_name__ :Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __magic_name__ :bool = True , __magic_name__ :Union[bool, str, PaddingStrategy] = False , __magic_name__ :Union[bool, str, TruncationStrategy] = False , __magic_name__ :Optional[int] = None , __magic_name__ :int = 0 , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = True , __magic_name__ :Optional[Union[str, TensorType]] = None , **__magic_name__ :Any , ) -> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a__ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
a__ = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def _UpperCamelCase ( self :Union[str, Any] , *__magic_name__ :List[str] , **__magic_name__ :Dict ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def _UpperCamelCase ( self :Tuple , *__magic_name__ :Tuple , **__magic_name__ :List[str] ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def _UpperCamelCase ( self :List[Any] ) -> str:
'''simple docstring'''
a__ = self.tokenizer.model_input_names
a__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self :int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , )
return self.image_processor
| 158
| 1
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase_ : List[Any] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def UpperCamelCase ( _A : int , _A : Tuple )-> Tuple:
"""simple docstring"""
warnings.warn(_A , _A )
requires_backends(_A , "sklearn" )
return (preds == labels).mean()
def UpperCamelCase ( _A : int , _A : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(_A , _A )
requires_backends(_A , "sklearn" )
A__ = simple_accuracy(_A , _A )
A__ = fa_score(y_true=_A , y_pred=_A )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCamelCase ( _A : Optional[int] , _A : List[str] )-> Tuple:
"""simple docstring"""
warnings.warn(_A , _A )
requires_backends(_A , "sklearn" )
A__ = pearsonr(_A , _A )[0]
A__ = spearmanr(_A , _A )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCamelCase ( _A : Optional[int] , _A : List[Any] , _A : Optional[Any] )-> Dict:
"""simple docstring"""
warnings.warn(_A , _A )
requires_backends(_A , "sklearn" )
assert len(_A ) == len(_A ), f"""Predictions and labels have mismatched lengths {len(_A )} and {len(_A )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_A , _A )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_A , _A )}
elif task_name == "mrpc":
return acc_and_fa(_A , _A )
elif task_name == "sts-b":
return pearson_and_spearman(_A , _A )
elif task_name == "qqp":
return acc_and_fa(_A , _A )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_A , _A )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_A , _A )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_A , _A )}
elif task_name == "rte":
return {"acc": simple_accuracy(_A , _A )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_A , _A )}
elif task_name == "hans":
return {"acc": simple_accuracy(_A , _A )}
else:
raise KeyError(_A )
def UpperCamelCase ( _A : List[str] , _A : int , _A : str )-> Dict:
"""simple docstring"""
warnings.warn(_A , _A )
requires_backends(_A , "sklearn" )
if len(_A ) != len(_A ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_A )} and {len(_A )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_A , _A )}
else:
raise KeyError(_A )
| 491
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ : Any = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCAmelCase_ : Union[str, Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCAmelCase_ : Optional[int] = "|".join(sys.argv[1:])
UpperCAmelCase_ : List[Any] = re.compile(RF'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase_ : List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 491
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase_ = get_tests_dir("fixtures")
lowercase_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase_ = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Tuple = 0
def snake_case__ ( self : int ):
__snake_case : int = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__snake_case : Tuple = WavaVecaFeatureExtractor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
__snake_case : Any = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Dict = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : List[str] ):
with self.assertRaisesRegex(
_lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__snake_case : Any = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def snake_case__ ( self : Dict ):
with self.assertRaisesRegex(
_lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__snake_case : Dict = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , revision="""aaaaaa""" )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
_lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case__ ( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
__snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
__snake_case : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
__snake_case : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def snake_case__ ( self : List[Any] ):
try:
AutoConfig.register("""custom""" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : List[str] = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : Union[str, Any] ):
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Tuple = True
try:
AutoConfig.register("""custom""" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
__snake_case : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_lowerCAmelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 390
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case_ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = None
lowerCamelCase = None
snake_case_ : Optional[int] = namedtuple("CoinsDistribResult", "moves excess")
def __a ( __UpperCAmelCase : TreeNode | None ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__UpperCAmelCase ) != count_coins(__UpperCAmelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__UpperCAmelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ : Tuple = get_distrib(node.right )
lowerCamelCase_ : Any = 1 - left_distrib_excess
lowerCamelCase_ : List[str] = 1 - right_distrib_excess
lowerCamelCase_ : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(__UpperCAmelCase )
+ abs(__UpperCAmelCase )
)
lowerCamelCase_ : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__UpperCAmelCase , __UpperCAmelCase )
return get_distrib(__UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488
|
def __a ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = [0] * len(__UpperCAmelCase )
lowerCamelCase_ : Dict = []
lowerCamelCase_ : int = []
lowerCamelCase_ : Any = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
lowerCamelCase_ : int = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print("Cycle exists" )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
snake_case_ : Optional[int] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 488
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Dict=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = ids_tensor([self.batch_size] , self.num_choices)
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = BioGptModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase)
__a = model(__lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
__a = BioGptForCausalLM(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = BioGptModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
# create attention mask
__a = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase)
__a = self.seq_length // 2
__a = 0
# first forward pass
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase).to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__a = ids_tensor((1,) , __lowerCamelCase).item() + 1
__a = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__a = random_other_next_tokens
# append to next input_ids and attn_mask
__a = torch.cat([input_ids, next_tokens] , dim=-1)
__a = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowerCamelCase)] , dim=1 , )
# get two different outputs
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase)['''last_hidden_state''']
__a = model(__lowerCamelCase , past_key_values=__lowerCamelCase , attention_mask=__lowerCamelCase)['''last_hidden_state''']
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1]).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = BioGptModel(config=__lowerCamelCase).to(__lowerCamelCase).eval()
__a = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase)
# first forward pass
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase)
__a = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size)
__a = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1)
__a = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase)['''last_hidden_state''']
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase)[
'''last_hidden_state'''
]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1]).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , *__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = BioGptForCausalLM(__lowerCamelCase)
model.to(__lowerCamelCase)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__a = model(__lowerCamelCase , labels=__lowerCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , *__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = BioGptModel(__lowerCamelCase)
__a = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_01)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , *__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = self.num_labels
__a = BioGptForTokenClassification(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
__a
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _A ( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
UpperCamelCase__ : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase__ : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = BioGptModelTester(self)
__a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowerCamelCase)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowerCamelCase , gradient_checkpointing=__lowerCamelCase)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowerCamelCase)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowerCamelCase)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowerCamelCase)
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__lowerCamelCase)
__a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__a = '''left'''
# Define PAD Token = EOS Token = 50256
__a = tokenizer.eos_token
__a = model.config.eos_token_id
# use different length sentences to test batching
__a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__a = tokenizer(__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase)
__a = inputs['''input_ids'''].to(__lowerCamelCase)
__a = model.generate(
input_ids=__lowerCamelCase , attention_mask=inputs['''attention_mask'''].to(__lowerCamelCase) , )
__a = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(__lowerCamelCase)
__a = model.generate(input_ids=__lowerCamelCase)
__a = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__a = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(__lowerCamelCase)
__a = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings)
__a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase)
__a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase)
__a = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase)
__a = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence])
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = BioGptModel.from_pretrained(__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict['''input_ids''']
__a = input_ids.ne(1).to(__lowerCamelCase)
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__a = BioGptForSequenceClassification(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = '''multi_label_classification'''
__a = input_dict['''input_ids''']
__a = input_ids.ne(1).to(__lowerCamelCase)
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__a = BioGptForSequenceClassification(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
__a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__a = torch.tensor([[2, 4_805, 9, 656, 21]])
__a = model(__lowerCamelCase)[0]
__a = 42_384
__a = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , __lowerCamelCase)
__a = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4))
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__lowerCamelCase)
torch.manual_seed(0)
__a = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(__lowerCamelCase)
__a = model.generate(
**__lowerCamelCase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=__lowerCamelCase , )
__a = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase)
__a = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : list[float] ):
if len(UpperCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
A__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = "xlm-roberta-xl"
def __init__(self , lowerCamelCase__=2_5_0_8_8_0 , lowerCamelCase__=2_5_6_0 , lowerCamelCase__=3_6 , lowerCamelCase__=3_2 , lowerCamelCase__=1_0_2_4_0 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_4 , lowerCamelCase__=1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCamelCase ( __snake_case):
@property
def A (self ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 574
| 1
|
"""simple docstring"""
from string import ascii_uppercase
lowerCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase_ = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Optional[int] = len(__lowerCamelCase )
lowercase__ : List[Any] = 0
while True:
if x == i:
lowercase__ : Dict = 0
if len(__lowerCamelCase ) == len(__lowerCamelCase ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : List[str] = ''''''
lowercase__ : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase__ : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : int = ''''''
lowercase__ : Any = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase__ : Tuple = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( ) -> None:
lowercase__ : Optional[int] = '''THE GERMAN ATTACK'''
lowercase__ : Optional[Any] = '''SECRET'''
lowercase__ : Any = generate_key(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[Any] = cipher_text(__lowerCamelCase , __lowerCamelCase )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(__lowerCamelCase , __lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="language-modeling" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({} )
lowerCAmelCase : str = "text"
@property
def UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 122
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__snake_case = UniSpeechSatForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__snake_case = downstream_dict["""projector.weight"""]
__snake_case = downstream_dict["""projector.bias"""]
__snake_case = downstream_dict["""model.post_net.linear.weight"""]
__snake_case = downstream_dict["""model.post_net.linear.bias"""]
return model
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__snake_case = UniSpeechSatForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__snake_case = downstream_dict["""model.linear.weight"""]
__snake_case = downstream_dict["""model.linear.bias"""]
return model
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = UniSpeechSatForXVector.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__snake_case = downstream_dict["""connector.weight"""]
__snake_case = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__snake_case = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__snake_case = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__snake_case = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__snake_case = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__snake_case = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__snake_case = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
__snake_case = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )
__snake_case = checkpoint["""Downstream"""]
__snake_case = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ )
__snake_case = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
__snake_case = convert_classification(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForAudioFrameClassification" ):
__snake_case = convert_diarization(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForXVector" ):
__snake_case = convert_xvector(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__snake_case = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 163
|
"""simple docstring"""
a_ = 256
# Modulus to hash a string
a_ = 1000003
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : str = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ )
if p_len > t_len:
return False
snake_case_ : str = 0
snake_case_ : Dict = 0
snake_case_ : List[str] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ : int = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = """abc1abc12"""
snake_case_ : Any = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
snake_case_ : List[str] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 2)
snake_case_ : Union[str, Any] = """ABABX"""
snake_case_ : Optional[Any] = """ABABZABABYABABX"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 3)
snake_case_ : Union[str, Any] = """AAAB"""
snake_case_ : Union[str, Any] = """ABAAAAAB"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 4)
snake_case_ : Optional[Any] = """abcdabcy"""
snake_case_ : Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 5)
snake_case_ : List[str] = """Lü"""
snake_case_ : Optional[int] = """Lüsai"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = """Lue"""
assert not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 480
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _a :
"""simple docstring"""
snake_case =None
snake_case =None
snake_case =None # sigma(t_i)
@classmethod
def SCREAMING_SNAKE_CASE ( cls ):
return cls()
@dataclass
class _a ( A__ ):
"""simple docstring"""
snake_case =4_2
snake_case =4_2
snake_case =4_2
class _a ( A__ , A__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self ):
return True
@register_to_config
def __init__( self , _snake_case = 0.02 , _snake_case = 100 , _snake_case = 1.007 , _snake_case = 80 , _snake_case = 0.05 , _snake_case = 50 , ):
pass
def SCREAMING_SNAKE_CASE ( self ):
return KarrasVeSchedulerState.create()
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case = () ):
_UpperCAmelCase =jnp.arange(0 , _snake_case )[::-1].copy()
_UpperCAmelCase =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_snake_case , schedule=jnp.array(_snake_case , dtype=jnp.floataa ) , timesteps=_snake_case , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , ):
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase =min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase =0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase =random.split(_snake_case , num=1 )
_UpperCAmelCase =self.config.s_noise * random.normal(key=_snake_case , shape=sample.shape )
_UpperCAmelCase =sigma + gamma * sigma
_UpperCAmelCase =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = True , ):
_UpperCAmelCase =sample_hat + sigma_hat * model_output
_UpperCAmelCase =(sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_snake_case , derivative=_snake_case , state=_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = True , ):
_UpperCAmelCase =sample_prev + sigma_prev * model_output
_UpperCAmelCase =(sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_snake_case , derivative=_snake_case , state=_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
raise NotImplementedError()
| 711
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="ylacombe/bark-small"
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase ="en_speaker_1"
_UpperCAmelCase ="This is a test string"
_UpperCAmelCase ="speaker_embeddings_path.json"
_UpperCAmelCase ="speaker_embeddings"
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase =35
_UpperCAmelCase =2
_UpperCAmelCase =8
_UpperCAmelCase ={
"semantic_prompt": np.ones(_snake_case ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase =os.path.join(self.tmpdirname , "file.npz" )
np.savez(_snake_case , **_snake_case )
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
_UpperCAmelCase =processor(text=self.input_string )
_UpperCAmelCase =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 592
| 0
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__UpperCAmelCase : Optional[int] = str(UpperCamelCase )
__UpperCAmelCase : List[Any] = "".join(sorted(UpperCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCamelCase ( UpperCamelCase = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
while True:
if check_bouncy(UpperCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 77
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ['''LayoutLMv2FeatureExtractor''']
a : str = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 31
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self: Tuple , __lowerCAmelCase: T ) -> str:
'''simple docstring'''
__UpperCAmelCase = data
__UpperCAmelCase = None
def __str__( self: Union[str, Any] ) -> str:
'''simple docstring'''
return F'''{self.data}'''
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self: Any ) -> None:
'''simple docstring'''
__UpperCAmelCase = None
def __iter__( self: Dict ) -> Iterator[T]:
'''simple docstring'''
__UpperCAmelCase = self.top
while node:
yield node.data
__UpperCAmelCase = node.next
def __str__( self: Dict ) -> str:
'''simple docstring'''
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self: str ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def _UpperCAmelCase ( self: Optional[int] ) -> bool:
'''simple docstring'''
return self.top is None
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: T ) -> None:
'''simple docstring'''
__UpperCAmelCase = Node(__lowerCAmelCase )
if not self.is_empty():
__UpperCAmelCase = self.top
__UpperCAmelCase = node
def _UpperCAmelCase ( self: List[str] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , __lowerCAmelCase )
__UpperCAmelCase = self.top
__UpperCAmelCase = self.top.next
return pop_node.data
def _UpperCAmelCase ( self: Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def _UpperCAmelCase ( self: List[str] ) -> None:
'''simple docstring'''
__UpperCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 221
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ = 250004
a_ = 250020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MBartaaTokenizer
lowerCAmelCase__ : List[Any] = MBartaaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self: int ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self: Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = "<s>"
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCAmelCase ) , 1_054 )
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def _UpperCAmelCase ( self: Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCAmelCase__ : Optional[int] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCAmelCase__ : Union[str, Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCAmelCase__ : Optional[int] = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def _UpperCAmelCase ( cls: Any ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__UpperCAmelCase = 1
return cls
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 )
def _UpperCAmelCase ( self: Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Any:
'''simple docstring'''
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
__UpperCAmelCase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__UpperCAmelCase = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
__UpperCAmelCase = 10
__UpperCAmelCase = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] )
def _UpperCAmelCase ( self: str ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def _UpperCAmelCase ( self: Any ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors="pt" )
__UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="pt" )
__UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="pt" )
__UpperCAmelCase = targets["input_ids"]
__UpperCAmelCase = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[250_004, 62, 3_034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 221
| 1
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AutoencoderKL
__snake_case = """sample"""
__snake_case = 1e-2
@property
def _snake_case ( self: int ):
__lowerCamelCase : str = 4
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : List[Any] = (32, 32)
__lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(a )
return {"sample": image}
@property
def _snake_case ( self: int ):
return (3, 32, 32)
@property
def _snake_case ( self: List[str] ):
return (3, 32, 32)
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__lowerCamelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self: List[Any] ):
pass
def _snake_case ( self: List[Any] ):
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def _snake_case ( self: List[str] ):
# enable deterministic behavior for gradient checkpointing
__lowerCamelCase : Any = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase : int = self.model_class(**a )
model.to(a )
assert not model.is_gradient_checkpointing and model.training
__lowerCamelCase : Optional[Any] = model(**a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__lowerCamelCase : Optional[int] = torch.randn_like(a )
__lowerCamelCase : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__lowerCamelCase : int = self.model_class(**a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__lowerCamelCase : int = model_a(**a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__lowerCamelCase : str = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__lowerCamelCase : Optional[int] = dict(model.named_parameters() )
__lowerCamelCase : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Optional[int] = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=a )
self.assertIsNotNone(a )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(a )
__lowerCamelCase : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__lowerCamelCase : List[Any] = model.to(a )
model.eval()
if torch_device == "mps":
__lowerCamelCase : List[str] = torch.manual_seed(0 )
else:
__lowerCamelCase : Any = torch.Generator(device=a ).manual_seed(0 )
__lowerCamelCase : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : Optional[int] = image.to(a )
with torch.no_grad():
__lowerCamelCase : Dict = model(a , sample_posterior=a , generator=a ).sample
__lowerCamelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__lowerCamelCase : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
__lowerCamelCase : Any = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__lowerCamelCase : Union[str, Any] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(a , a , rtol=1e-2 ) )
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: str , a: Optional[int] , a: Any ):
return F'gaussian_noise_s={seed}_shape={"_".join([str(a ) for s in shape] )}.npy'
def _snake_case ( self: List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: int , a: List[str]=0 , a: Tuple=(4, 3, 512, 512) , a: Dict=False ):
__lowerCamelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
__lowerCamelCase : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(a , a ) ) ).to(a ).to(a )
return image
def _snake_case ( self: Dict , a: Any="CompVis/stable-diffusion-v1-4" , a: Dict=False ):
__lowerCamelCase : List[Any] = 'fp16' if fpaa else None
__lowerCamelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
__lowerCamelCase : Union[str, Any] = AutoencoderKL.from_pretrained(
a , subfolder='vae' , torch_dtype=a , revision=a , )
model.to(a ).eval()
return model
def _snake_case ( self: Optional[int] , a: int=0 ):
if torch_device == "mps":
return torch.manual_seed(a )
return torch.Generator(device=a ).manual_seed(a )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _snake_case ( self: int , a: str , a: int , a: int ):
__lowerCamelCase : List[str] = self.get_sd_vae_model()
__lowerCamelCase : Dict = self.get_sd_image(a )
__lowerCamelCase : Dict = self.get_generator(a )
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(a , generator=a , sample_posterior=a ).sample
assert sample.shape == image.shape
__lowerCamelCase : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCamelCase : int = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(a , a , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self: str , a: List[Any] , a: Any ):
__lowerCamelCase : Optional[Any] = self.get_sd_vae_model(fpaa=a )
__lowerCamelCase : Optional[int] = self.get_sd_image(a , fpaa=a )
__lowerCamelCase : Union[str, Any] = self.get_generator(a )
with torch.no_grad():
__lowerCamelCase : Dict = model(a , generator=a , sample_posterior=a ).sample
assert sample.shape == image.shape
__lowerCamelCase : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCamelCase : int = torch.tensor(a )
assert torch_all_close(a , a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _snake_case ( self: str , a: Optional[Any] , a: Dict , a: Optional[Any] ):
__lowerCamelCase : Optional[Any] = self.get_sd_vae_model()
__lowerCamelCase : List[Any] = self.get_sd_image(a )
with torch.no_grad():
__lowerCamelCase : Any = model(a ).sample
assert sample.shape == image.shape
__lowerCamelCase : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCamelCase : List[Any] = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(a , a , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self: Union[str, Any] , a: List[str] , a: int ):
__lowerCamelCase : str = self.get_sd_vae_model()
__lowerCamelCase : Tuple = self.get_sd_image(a , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__lowerCamelCase : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
__lowerCamelCase : Optional[Any] = torch.tensor(a )
assert torch_all_close(a , a , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self: int , a: str , a: str ):
__lowerCamelCase : Any = self.get_sd_vae_model(fpaa=a )
__lowerCamelCase : str = self.get_sd_image(a , shape=(3, 4, 64, 64) , fpaa=a )
with torch.no_grad():
__lowerCamelCase : Tuple = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__lowerCamelCase : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCamelCase : List[Any] = torch.tensor(a )
assert torch_all_close(a , a , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _snake_case ( self: Any , a: List[Any] ):
__lowerCamelCase : List[Any] = self.get_sd_vae_model(fpaa=a )
__lowerCamelCase : int = self.get_sd_image(a , shape=(3, 4, 64, 64) , fpaa=a )
with torch.no_grad():
__lowerCamelCase : List[Any] = model.decode(a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a , a , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _snake_case ( self: int , a: Union[str, Any] ):
__lowerCamelCase : str = self.get_sd_vae_model()
__lowerCamelCase : str = self.get_sd_image(a , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCamelCase : Tuple = model.decode(a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCamelCase : List[Any] = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a , a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def _snake_case ( self: Any , a: Optional[Any] , a: int ):
__lowerCamelCase : int = self.get_sd_vae_model()
__lowerCamelCase : int = self.get_sd_image(a )
__lowerCamelCase : Any = self.get_generator(a )
with torch.no_grad():
__lowerCamelCase : Tuple = model.encode(a ).latent_dist
__lowerCamelCase : List[str] = dist.sample(generator=a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__lowerCamelCase : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
__lowerCamelCase : int = torch.tensor(a )
__lowerCamelCase : int = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(a , a , atol=a )
| 710
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return getitem, k
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return setitem, k, v
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return delitem, k
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
lowercase_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowercase_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowercase_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowercase_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowercase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowercase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = HashMap(initial_block_size=4 )
__lowerCamelCase : Dict = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : str = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def UpperCamelCase__ ( ):
def is_public(SCREAMING_SNAKE_CASE__ ) -> bool:
return not name.startswith('_' )
__lowerCamelCase : Optional[Any] = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase : Dict = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 230
| 0
|
def A(__a: str , __a: int ):
lowerCAmelCase_ = word.split()
def justify(__a: list , __a: int , __a: int ) -> str:
lowerCAmelCase_ = max_width - width
lowerCAmelCase_ = len(__a )
if len(__a ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__a ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase_ = []
for i in range(__a ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__a )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
for word in words:
if width + len(__a ) + len(__a ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__a )
width += len(__a )
else:
# justify the line and add it to result
answer.append(justify(__a , __a , __a ) )
# reset new line and new width
lowerCAmelCase_ , lowerCAmelCase_ = [word], len(__a )
lowerCAmelCase_ = max_width - width - len(__a )
answer.append(" ".join(__a ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 122
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase__ = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def A(__a: List[Any] ):
lowerCAmelCase_ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __a )
return [m.group(0 ) for m in matches]
def A():
lowerCAmelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCAmelCase_ = collections.defaultdict(__a )
lowerCAmelCase_ = collections.defaultdict(__a )
lowerCAmelCase_ = collections.defaultdict(__a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__a ):
lowerCAmelCase_ = None
if _re_tf_models.match(__a ) is not None:
lowerCAmelCase_ = tf_models
lowerCAmelCase_ = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
lowerCAmelCase_ = flax_models
lowerCAmelCase_ = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
lowerCAmelCase_ = pt_models
lowerCAmelCase_ = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCAmelCase_ = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ = "".join(camel_case_split(__a )[:-1] )
lowerCAmelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCAmelCase_ = list(__a )
all_models.sort()
lowerCAmelCase_ = {"model_type": all_models}
lowerCAmelCase_ = [pt_models[t] for t in all_models]
lowerCAmelCase_ = [tf_models[t] for t in all_models]
lowerCAmelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCAmelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCAmelCase_ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCAmelCase_ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCAmelCase_ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCAmelCase_ = "AutoTokenizer"
lowerCAmelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(__a )
def A(__a: List[str] ):
lowerCAmelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCAmelCase_ = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
lowerCAmelCase_ = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(__a , __a , __a ):
# The type of pipeline may not exist in this framework
if not hasattr(__a , __a ):
continue
# First extract all model_names
lowerCAmelCase_ = []
for name in getattr(__a , __a ).values():
if isinstance(__a , __a ):
model_names.append(__a )
else:
model_names.extend(list(__a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A(__a: Tuple , __a: int ):
lowerCAmelCase_ = get_frameworks_table()
lowerCAmelCase_ = Dataset.from_pandas(__a )
lowerCAmelCase_ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__a )
lowerCAmelCase_ = Dataset.from_json(__a )
lowerCAmelCase_ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__a ) )
}
lowerCAmelCase_ = update_pipeline_and_auto_class_table(__a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCAmelCase_ = sorted(table.keys() )
lowerCAmelCase_ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
lowerCAmelCase_ = Dataset.from_pandas(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__a , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__a , "pipeline_tags.json" ) )
if commit_sha is not None:
lowerCAmelCase_ = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
lowerCAmelCase_ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__a , repo_type="dataset" , token=__a , commit_message=__a , )
def A():
lowerCAmelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCAmelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCAmelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCAmelCase_ = pipeline_tasks[key]["pt"]
if isinstance(__a , (list, tuple) ):
lowerCAmelCase_ = model[0]
lowerCAmelCase_ = model.__name__
if model not in in_table.values():
missing.append(__a )
if len(__a ) > 0:
lowerCAmelCase_ = ", ".join(__a )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCamelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 122
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case__ : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase ,cache_dir=__lowercase )
snake_case__ : Tuple = [t[-1] for t in os.walk(os.path.join(__lowercase ,os.listdir(__lowercase )[0] ,'''snapshots''' ) )]
snake_case__ : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ , snake_case__ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase )
snake_case__ : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Tuple = jax.random.PRNGKey(0 )
snake_case__ : int = 4
snake_case__ : Optional[int] = jax.device_count()
snake_case__ : List[Any] = num_samples * [prompt]
snake_case__ : Tuple = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : str = replicate(__lowercase )
snake_case__ : List[Any] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : List[str] = shard(__lowercase )
snake_case__ : str = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(__lowercase ,dtype=np.floataa ).sum() - 4_9947.875 ) < 5e-1
snake_case__ : Optional[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def __lowerCamelCase ( self :Tuple ):
snake_case__ , snake_case__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''flax''' ,safety_checker=__lowercase )
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Union[str, Any] = jax.random.PRNGKey(0 )
snake_case__ : List[Any] = 5_0
snake_case__ : Union[str, Any] = jax.device_count()
snake_case__ : Optional[int] = num_samples * [prompt]
snake_case__ : Any = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : str = replicate(__lowercase )
snake_case__ : Optional[int] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Dict = shard(__lowercase )
snake_case__ : str = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 238_3808.2) ) < 5e-1
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ , snake_case__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase )
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Dict = jax.random.PRNGKey(0 )
snake_case__ : Any = 5_0
snake_case__ : Tuple = jax.device_count()
snake_case__ : str = num_samples * [prompt]
snake_case__ : int = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Dict = replicate(__lowercase )
snake_case__ : Union[str, Any] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Tuple = shard(__lowercase )
snake_case__ : Optional[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa )
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : int = jax.random.PRNGKey(0 )
snake_case__ : Optional[int] = 5_0
snake_case__ : List[str] = jax.device_count()
snake_case__ : int = num_samples * [prompt]
snake_case__ : List[str] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(__lowercase )
snake_case__ : str = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Dict = shard(__lowercase )
snake_case__ : Union[str, Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Union[str, Any] = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,set_alpha_to_one=__lowercase ,steps_offset=1 ,)
snake_case__ , snake_case__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,scheduler=__lowercase ,safety_checker=__lowercase ,)
snake_case__ : Dict = scheduler.create_state()
snake_case__ : List[str] = scheduler_state
snake_case__ : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Tuple = jax.random.PRNGKey(0 )
snake_case__ : int = 5_0
snake_case__ : int = jax.device_count()
snake_case__ : List[Any] = num_samples * [prompt]
snake_case__ : Tuple = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Tuple = replicate(__lowercase )
snake_case__ : Dict = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Optional[Any] = shard(__lowercase )
snake_case__ : List[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 234_7693.5) ) < 5e-1
def __lowerCamelCase ( self :Tuple ):
snake_case__ : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Dict = jax.device_count()
snake_case__ : List[str] = num_samples * [prompt]
snake_case__ : Tuple = jax.random.split(jax.random.PRNGKey(0 ) ,__lowercase )
snake_case__ , snake_case__ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,)
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : Union[str, Any] = pipeline.prepare_inputs(__lowercase )
snake_case__ : str = shard(__lowercase )
snake_case__ : Optional[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
snake_case__ , snake_case__ : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,use_memory_efficient_attention=__lowercase ,)
snake_case__ : Optional[Any] = replicate(__lowercase )
snake_case__ : List[Any] = pipeline.prepare_inputs(__lowercase )
snake_case__ : List[str] = shard(__lowercase )
snake_case__ : Tuple = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 219
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : str = ShapEPipeline
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Tuple = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase : Optional[Any] = False
@property
def __lowerCamelCase ( self :Dict ):
return 3_2
@property
def __lowerCamelCase ( self :str ):
return 3_2
@property
def __lowerCamelCase ( self :Optional[int] ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self :int ):
return 8
@property
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCamelCase ( self :int ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(__lowercase )
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
snake_case__ : Optional[int] = PriorTransformer(**__lowercase )
return model
@property
def __lowerCamelCase ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case__ : Dict = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
snake_case__ : List[str] = ShapERenderer(**__lowercase )
return model
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = self.dummy_prior
snake_case__ : Optional[Any] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : Optional[int] = self.dummy_renderer
snake_case__ : str = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=__lowercase ,clip_sample=__lowercase ,clip_sample_range=1.0 ,)
snake_case__ : Tuple = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ,__lowercase :str=0 ):
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(__lowercase )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
snake_case__ : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = '''cpu'''
snake_case__ : str = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__lowercase )
snake_case__ : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(__lowercase ) )
snake_case__ : Dict = output.images[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = torch_device == '''cpu'''
snake_case__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=__lowercase ,relax_max_difference=__lowercase ,)
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**__lowercase )
snake_case__ : str = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Any = 1
snake_case__ : str = 2
snake_case__ : Any = self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ : Optional[Any] = batch_size * [inputs[key]]
snake_case__ : Any = pipe(**__lowercase ,num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
snake_case__ : Optional[int] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
snake_case__ : Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : Tuple = pipe(
'''a shark''' ,generator=__lowercase ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowercase ,__lowercase )
| 219
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''gpt_neox'''
def __init__( self : int , lowerCamelCase_ : Dict=5_04_32 , lowerCamelCase_ : Tuple=61_44 , lowerCamelCase_ : Any=44 , lowerCamelCase_ : Optional[int]=64 , lowerCamelCase_ : List[str]=2_45_76 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : Tuple=0.25 , lowerCamelCase_ : Optional[int]=1_00_00 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : str=20_48 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : str=1e-5 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str=0 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Any=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = rotary_pct
SCREAMING_SNAKE_CASE : List[str] = rotary_emb_base
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Dict = hidden_dropout
SCREAMING_SNAKE_CASE : Any = classifier_dropout
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = tie_word_embeddings
SCREAMING_SNAKE_CASE : Tuple = use_parallel_residual
SCREAMING_SNAKE_CASE : str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE : str = self.rope_scaling.get("""type""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.rope_scaling.get("""factor""" , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 379
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : int = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i]
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Any = -1
for i in range(lowerCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE : Any = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [0] * no_of_processes
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__UpperCAmelCase = 4
__UpperCAmelCase = [2, 5, 3, 7]
__UpperCAmelCase = [0, 0, 0, 0]
__UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 379
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase : str = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def UpperCAmelCase_ ( _UpperCAmelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowercase : Optional[Any] = parser.parse_args()
if args.check_lib:
lowercase : List[Any] = importlib.import_module("""transformers""")
lowercase : List[str] = Path(transformers_module.__file__).parent
else:
lowercase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 705
|
class a__ ( __SCREAMING_SNAKE_CASE ):
pass
class a__ ( __SCREAMING_SNAKE_CASE ):
pass
class a__ :
def __init__( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: Any = [
[],
[],
[],
]
def lowerCAmelCase ( self : int , A_ : int , A_ : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(A_ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class a__ :
def __init__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = []
def lowerCAmelCase ( self : List[str] , A_ : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(A_ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCamelCase_: str = min(self.queue )
self.queue.remove(A_ )
return data
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.queue )
def UpperCAmelCase_ ( ):
lowerCamelCase_: Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCAmelCase_ ( ):
lowerCamelCase_: str = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 584
| 0
|
import os
from distutils.util import strtobool
def A ( lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> List[str]:
for e in env_keys:
UpperCamelCase__ :Optional[Any] = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def A ( lowercase__ : Optional[int] , lowercase__ : int=False ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def A ( lowercase__ : Tuple , lowercase__ : Dict="no" ) -> List[Any]:
UpperCamelCase__ :Optional[int] = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 45
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = ["image_processor", "tokenizer"]
A :Any = "LayoutLMv3ImageProcessor"
A :str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
a__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a__ : List[str] = kwargs.pop("feature_extractor" )
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
a__ : List[str] = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
a__ : Optional[Any] = features["words"]
a__ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
a__ : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
a__ : str = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
a__ : Tuple = images
return encoded_inputs
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}' )
return images_with_overflow
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _A ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 191
| 0
|
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str = " " ):
"""simple docstring"""
_a : Union[str, Any] = []
_a : Dict = 0
for index, char in enumerate(__a ):
if char == separator:
split_words.append(string[last_index:index] )
_a : Optional[int] = index + 1
elif index + 1 == len(__a ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 319
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ,**_a : List[Any] ):
'''simple docstring'''
requires_backends(self ,['bs4'] )
super().__init__(**_a )
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
_a : List[str] = []
_a : Any = []
_a : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_a : Dict = parent.find_all(child.name ,recursive=_a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_a ) else next(i for i, s in enumerate(_a ,1 ) if s is child ) )
_a : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowercase ( self : int ,_a : Any ):
'''simple docstring'''
_a : List[str] = BeautifulSoup(_a ,'html.parser' )
_a : int = []
_a : int = []
_a : Optional[Any] = []
for element in html_code.descendants:
if type(_a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_a : Tuple = html.unescape(_a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_a )
_a, _a : Tuple = self.xpath_soup(_a )
stringaxtag_seq.append(_a )
stringaxsubs_seq.append(_a )
if len(_a ) != len(_a ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_a ) != len(_a ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowercase ( self : List[Any] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : List[str] = ''
for tagname, subs in zip(_a ,_a ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : str ,_a : Tuple ):
'''simple docstring'''
_a : Optional[int] = False
# Check that strings has a valid type
if isinstance(_a ,_a ):
_a : Optional[int] = True
elif isinstance(_a ,(list, tuple) ):
if len(_a ) == 0 or isinstance(html_strings[0] ,_a ):
_a : int = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"""but is of type {type(_a )}.""" )
_a : List[str] = bool(isinstance(_a ,(list, tuple) ) and (isinstance(html_strings[0] ,_a )) )
if not is_batched:
_a : Optional[int] = [html_strings]
# Get nodes + xpaths
_a : Union[str, Any] = []
_a : int = []
for html_string in html_strings:
_a, _a, _a : Any = self.get_three_from_single(_a )
nodes.append(_a )
_a : Tuple = []
for node, tag_list, sub_list in zip(_a ,_a ,_a ):
_a : Dict = self.construct_xpath(_a ,_a )
xpath_strings.append(_a )
xpaths.append(_a )
# return as Dict
_a : Optional[Any] = {'nodes': nodes, 'xpaths': xpaths}
_a : Tuple = BatchFeature(data=_a ,tensor_type=_a )
return encoded_inputs
| 319
| 1
|
def A ( _lowerCamelCase ): # noqa: E741
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len(a_ )
_lowerCAmelCase : Any = 0
_lowerCAmelCase : List[Any] = [0] * n
_lowerCAmelCase : Tuple = [False] * n
_lowerCAmelCase : Tuple = [False] * n
def dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Optional[int] = dfs(a_ , a_ , a_ , a_ )
_lowerCAmelCase : Optional[int] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : Any = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : List[str] = True
else:
_lowerCAmelCase : Any = min(low[at] , a_ )
return out_edge_count
for i in range(a_ ):
if not visited[i]:
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Union[str, Any] = dfs(a_ , a_ , -1 , a_ )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(a_ ) ):
if is_art[x] is True:
print(a_ )
# Adjacency list of graph
_snake_case = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 500
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase_ = random.Random()
def SCREAMING_SNAKE_CASE ( a_ : Optional[Any] , a_ : Any=1.0 , a_ : Optional[Any]=None , a_ : Union[str, Any]=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=400 , UpperCamelCase=2000 , UpperCamelCase=10 , UpperCamelCase=160 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=4000 , UpperCamelCase=False , UpperCamelCase=True , ) -> Optional[Any]:
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
__a = feature_size
__a = chunk_length
__a = hop_length
def UpperCamelCase__ ( self ) -> Tuple:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , UpperCamelCase=False , UpperCamelCase=False ) -> int:
def _flatten(UpperCamelCase ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) -> str:
__a = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
__a = self.feature_extraction_class.from_pretrained(UpperCamelCase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = feat_extract_first.mel_filters
__a = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Dict:
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase )
__a = self.feature_extraction_class.from_json_file(UpperCamelCase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = feat_extract_first.mel_filters
__a = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
__a = feature_extractor(UpperCamelCase , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__a = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__a = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test batched
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(UpperCamelCase )
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test truncation required
__a = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__a = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
__a = [x[: feature_extractor.n_samples] for x in speech_inputs]
__a = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs_truncated]
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> int:
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 , 32 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__a = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Tuple:
__a = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__a = ds.sort('id' ).select(range(UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) -> int:
# fmt: off
__a = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__a = self._load_datasamples(1 )
__a = WhisperFeatureExtractor()
__a = feature_extractor(UpperCamelCase , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase , atol=1e-4 ) )
def UpperCamelCase__ ( self ) -> List[Any]:
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = self._load_datasamples(1 )[0]
__a = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__a = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase ) - 1 ) < 1e-3 ) )
| 539
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _A :
"""simple docstring"""
def __init__( self : str , A_ : List[str] , A_ : Dict=13 , A_ : Union[str, Any]=32 , A_ : int=2 , A_ : int=3 , A_ : Optional[Any]=16 , A_ : Optional[int]=[1, 2, 1] , A_ : str=[2, 2, 4] , A_ : Optional[Any]=2 , A_ : Any=2.0 , A_ : Optional[Any]=True , A_ : Optional[Any]=0.0 , A_ : Union[str, Any]=0.0 , A_ : List[str]=0.1 , A_ : Optional[int]="gelu" , A_ : Tuple=False , A_ : Any=True , A_ : Tuple=0.02 , A_ : Dict=1E-5 , A_ : str=True , A_ : int=None , A_ : Union[str, Any]=True , A_ : int=10 , A_ : Optional[int]=8 , A_ : Union[str, Any]=["stage1", "stage2", "stage3"] , A_ : List[Any]=[1, 2, 3] , ) -> Optional[Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = patch_norm
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = is_training
__snake_case = scope
__snake_case = use_labels
__snake_case = type_sequence_label_size
__snake_case = encoder_stride
__snake_case = out_features
__snake_case = out_indices
def lowercase ( self : List[Any] ) -> Optional[Any]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : int ) -> int:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase ( self : Optional[int] , A_ : Optional[Any] , A_ : List[str] , A_ : List[str] ) -> Union[str, Any]:
__snake_case = MaskFormerSwinModel(config=A_ )
model.to(A_ )
model.eval()
__snake_case = model(A_ )
__snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase ( self : Optional[Any] , A_ : str , A_ : Optional[int] , A_ : Tuple ) -> int:
__snake_case = MaskFormerSwinBackbone(config=A_ )
model.to(A_ )
model.eval()
__snake_case = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(A_ ):
__snake_case = ['''stem''']
__snake_case = MaskFormerSwinBackbone(config=A_ )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
def lowercase ( self : int ) -> List[str]:
__snake_case = MaskFormerSwinModelTester(self )
__snake_case = ConfigTester(self , config_class=A_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def lowercase ( self : Any ) -> Any:
pass
def lowercase ( self : str ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : int ) -> Dict:
return
def lowercase ( self : Tuple ) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
@unittest.skip('''Swin does not use inputs_embeds''' )
def lowercase ( self : List[str] ) -> List[str]:
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def lowercase ( self : str ) -> Any:
pass
def lowercase ( self : List[str] ) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def lowercase ( self : Tuple ) -> Any:
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def lowercase ( self : Any ) -> List[Any]:
pass
def lowercase ( self : Dict , A_ : List[Any] , A_ : Tuple , A_ : List[Any] , A_ : Union[str, Any] ) -> int:
__snake_case = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(A_ , A_ ) )
__snake_case = outputs.hidden_states
__snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# Swin has a different seq_length
__snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__snake_case = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def lowercase ( self : Any ) -> Dict:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__snake_case = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def lowercase ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def lowercase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def lowercase ( self : List[str] ) -> Union[str, Any]:
pass
def lowercase ( self : Optional[Any] ) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(A_ : List[str] ):
__snake_case = 0
return t
def check_equivalence(A_ : Union[str, Any] , A_ : List[Any] , A_ : Optional[int] , A_ : List[str]={} ):
with torch.no_grad():
__snake_case = model(**A_ , return_dict=A_ , **A_ )
__snake_case = model(**A_ , return_dict=A_ , **A_ ).to_tuple()
def recursive_check(A_ : List[Any] , A_ : List[str] ):
if isinstance(A_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(A_ , A_ ):
recursive_check(A_ , A_ )
elif isinstance(A_ , A_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(A_ , A_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(A_ ) , set_nan_tensor_to_zero(A_ ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(A_ ).any()} and `inf`: {torch.isinf(A_ )}. Dict has"
f" `nan`: {torch.isnan(A_ ).any()} and `inf`: {torch.isinf(A_ )}."
) , )
recursive_check(A_ , A_ )
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
model.to(A_ )
model.eval()
__snake_case = self._prepare_for_class(A_ , A_ )
__snake_case = self._prepare_for_class(A_ , A_ )
check_equivalence(A_ , A_ , A_ )
__snake_case = self._prepare_for_class(A_ , A_ , return_labels=A_ )
__snake_case = self._prepare_for_class(A_ , A_ , return_labels=A_ )
check_equivalence(A_ , A_ , A_ )
__snake_case = self._prepare_for_class(A_ , A_ )
__snake_case = self._prepare_for_class(A_ , A_ )
check_equivalence(A_ , A_ , A_ , {'''output_hidden_states''': True} )
__snake_case = self._prepare_for_class(A_ , A_ , return_labels=A_ )
__snake_case = self._prepare_for_class(A_ , A_ , return_labels=A_ )
check_equivalence(A_ , A_ , A_ , {'''output_hidden_states''': True} )
@require_torch
class _A ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase_ : int = MaskFormerSwinConfig
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = MaskFormerSwinModelTester(self )
def lowercase ( self : Optional[int] ) -> List[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__snake_case = backbone_class(A_ )
backbone.to(A_ )
backbone.eval()
__snake_case = backbone(**A_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , A_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__snake_case = backbone(**A_ , output_hidden_states=A_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__snake_case , __snake_case , __snake_case = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__snake_case = backbone(**A_ , output_attentions=A_ )
self.assertIsNotNone(outputs.attentions )
| 93
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowercase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case=False, ):
output_path.parent.mkdir(parents=snake_case, exist_ok=snake_case)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case, snake_case, f=output_path.as_posix(), input_names=snake_case, output_names=snake_case, dynamic_axes=snake_case, do_constant_folding=snake_case, use_external_data_format=snake_case, enable_onnx_checker=snake_case, opset_version=snake_case, )
else:
export(
snake_case, snake_case, f=output_path.as_posix(), input_names=snake_case, output_names=snake_case, dynamic_axes=snake_case, do_constant_folding=snake_case, opset_version=snake_case, )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case = False):
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
__snake_case = '''cpu'''
__snake_case = Path(snake_case)
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + '''/vae''')
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
snake_case, model_args=(
torch.randn(1, snake_case, 25, 25).to(device=snake_case, dtype=snake_case),
False,
), output_path=output_path / '''vae_decoder''' / '''model.onnx''', ordered_input_names=['''latent_sample''', '''return_dict'''], output_names=['''sample'''], dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
}, opset=snake_case, )
del vae_decoder
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__lowercase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 93
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bit"""
UpperCamelCase = ["""preactivation""", """bottleneck"""]
UpperCamelCase = ["""SAME""", """VALID"""]
def __init__( self :Union[str, Any] , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=64 , lowerCamelCase_ :Any=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :Union[str, Any]=[3, 4, 6, 3] , lowerCamelCase_ :Dict="preactivation" , lowerCamelCase_ :List[Any]="relu" , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Union[str, Any]=32 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Any=False , lowerCamelCase_ :List[Any]=32 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :str , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE : Any = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = embedding_size
SCREAMING_SNAKE_CASE : Dict = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Tuple = layer_type
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = global_padding
SCREAMING_SNAKE_CASE : int = num_groups
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : Any = embedding_dynamic_padding
SCREAMING_SNAKE_CASE : List[Any] = output_stride
SCREAMING_SNAKE_CASE : List[str] = width_factor
SCREAMING_SNAKE_CASE : Any = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 698
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698
| 1
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase ( a ):
def __init__( self : int , _UpperCamelCase : int = 101 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = length
def __len__( self : List[str] ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self : Dict , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return i
class lowercase :
def __call__( self : List[str] , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
return {"input_ids": torch.tensor(_UpperCamelCase ), "labels": torch.tensor(_UpperCamelCase )}
class lowercase ( nn.Module ):
def __init__( self : Dict ) -> List[Any]:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
SCREAMING_SNAKE_CASE = nn.Linear(120 , 80 )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowercase ( a ):
@require_torch_neuroncore
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"--output_dir {output_dir}".split()
SCREAMING_SNAKE_CASE = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase ( a ):
@require_torch_multi_gpu
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"--output_dir {output_dir}".split()
SCREAMING_SNAKE_CASE = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_lowerCamelCase : Optional[Any] = HfArgumentParser((TrainingArguments,))
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
_lowerCamelCase : List[str] = DummyDataset(dataset_length)
def __lowerCamelCase (UpperCAmelCase__ : EvalPrediction ):
SCREAMING_SNAKE_CASE = list(range(len(UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_lowerCamelCase : str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_lowerCamelCase : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowerCamelCase : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowerCamelCase : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowerCamelCase : int = None
| 647
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""yjernite/retribert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : str =VOCAB_FILES_NAMES
a_ : int =PRETRAINED_VOCAB_FILES_MAP
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Union[str, Any] =RetriBertTokenizer
a_ : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCamelCase : Any=None , UpperCamelCase : Dict=None , UpperCamelCase : Dict=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : Any="[PAD]" , UpperCamelCase : Tuple="[CLS]" , UpperCamelCase : str="[MASK]" , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : str = do_lower_case
_snake_case : str = strip_accents
_snake_case : Union[str, Any] = tokenize_chinese_chars
_snake_case : Optional[int] = normalizer_class(**UpperCamelCase )
_snake_case : Any = do_lower_case
def UpperCamelCase_ ( self : Any , UpperCamelCase : int , UpperCamelCase : Tuple=None ):
'''simple docstring'''
_snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Tuple = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 411
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPFeatureExtractor"""]
lowerCAmelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411
| 1
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase : Optional[Any] = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self , a = True ) -> None:
lowercase__ : dict[T, list[T]] = {} # dictionary of lists
lowercase__ : str = directed
def _UpperCAmelCase ( self , a , a ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
self.adj_list[destination_vertex].append(a )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowercase__ : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a )
lowercase__ : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase__ : Optional[Any] = [destination_vertex]
lowercase__ : List[str] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowercase__ : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase__ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase__ : Tuple = [destination_vertex]
lowercase__ : int = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "owlvit_text_model"
def __init__( self , _a=4_9_4_0_8 , _a=5_1_2 , _a=2_0_4_8 , _a=1_2 , _a=8 , _a=1_6 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , _a=0 , _a=4_9_4_0_6 , _a=4_9_4_0_7 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : Tuple = hidden_size
_a : Optional[int] = intermediate_size
_a : Any = num_hidden_layers
_a : int = num_attention_heads
_a : Tuple = max_position_embeddings
_a : List[Any] = hidden_act
_a : str = layer_norm_eps
_a : Dict = attention_dropout
_a : Any = initializer_range
_a : Union[str, Any] = initializer_factor
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Optional[int] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a : int = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "owlvit_vision_model"
def __init__( self , _a=7_6_8 , _a=3_0_7_2 , _a=1_2 , _a=1_2 , _a=3 , _a=7_6_8 , _a=3_2 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Dict:
super().__init__(**_a )
_a : Dict = hidden_size
_a : Optional[Any] = intermediate_size
_a : Optional[Any] = num_hidden_layers
_a : int = num_attention_heads
_a : int = num_channels
_a : int = image_size
_a : Dict = patch_size
_a : List[str] = hidden_act
_a : Tuple = layer_norm_eps
_a : int = attention_dropout
_a : Any = initializer_range
_a : Dict = initializer_factor
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Tuple = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "owlvit"
UpperCAmelCase__ : str = True
def __init__( self , _a=None , _a=None , _a=5_1_2 , _a=2.6592 , _a=True , **_a , ) -> Any:
super().__init__(**_a )
if text_config is None:
_a : Union[str, Any] = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_a : List[str] = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_a : str = OwlViTTextConfig(**_a )
_a : Dict = OwlViTVisionConfig(**_a )
_a : int = projection_dim
_a : Optional[int] = logit_scale_init_value
_a : List[Any] = return_dict
_a : Optional[Any] = 1.0
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Dict = cls.get_config_dict(_a , **_a )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
@classmethod
def __lowercase ( cls , _a , _a , **_a ) -> Dict:
_a : int = {}
_a : Optional[int] = text_config
_a : Optional[Any] = vision_config
return cls.from_dict(_a , **_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = copy.deepcopy(self.__dict__ )
_a : Optional[int] = self.text_config.to_dict()
_a : Optional[Any] = self.vision_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def __lowercase ( self ) -> float:
return 1e-4
def __lowercase ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Mapping[str, Any]:
_a : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a )
_a : Optional[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_a , framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def __lowercase ( self ) -> int:
return 1_4
| 14
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 0
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase__( *__UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Union[Dict, Any]] = None , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=2 ):
from .. import __version__
__snake_case : Tuple = take_from
__snake_case : Tuple = ()
if not isinstance(args[0] , snake_case__ ):
__snake_case : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(snake_case__ ).base_version ) >= version.parse(snake_case__ ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
__snake_case : Optional[Any] = None
if isinstance(snake_case__ , snake_case__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(snake_case__ ),)
__snake_case : List[Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(snake_case__ , snake_case__ ):
values += (getattr(snake_case__ , snake_case__ ),)
__snake_case : Union[str, Any] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
__snake_case : List[Any] = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
__snake_case : Any = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , snake_case__ , stacklevel=snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : str = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : Any = call_frame.function
__snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(snake_case__ ) == 0:
return
elif len(snake_case__ ) == 1:
return values[0]
return values
| 706
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_: int = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Optional[Any] = ['MobileNetV2FeatureExtractor']
lowercase_: Union[str, Any] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: List[str] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowercase_: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 648
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_: Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
lowercase_: str = 5
lowercase_: int = 10
@require_sentencepiece
@require_tokenizers
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = SpeechaTextTokenizer
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = True
def lowercase ( self : Tuple ):
super().setUp()
snake_case__ : str = sp.SentencePieceProcessor()
spm_model.Load(__a )
snake_case__ : List[str] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__a ) )]
snake_case__ : Dict = dict(zip(__a , range(len(__a ) ) ) )
snake_case__ : Optional[Any] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
snake_case__ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : int ):
snake_case__ : Any = """<pad>"""
snake_case__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowercase ( self : Any ):
snake_case__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__a ) , 1_0_0_1 )
def lowercase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def lowercase ( self : Dict ):
snake_case__ : List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
snake_case__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
snake_case__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase ( self : List[str] ):
# fmt: off
snake_case__ : Union[str, Any] = {"""input_ids""": [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'valhalla/s2t_mustc_multilinguial_medium'
__UpperCamelCase : Union[str, Any] = 'C\'est trop cool'
__UpperCamelCase : List[str] = 'Esto es genial'
@classmethod
def lowercase ( cls : str ):
snake_case__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase ( self : List[str] ):
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 1_1 )
def lowercase ( self : List[str] ):
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def lowercase ( self : List[Any] ):
self.assertIn(__a , self.tokenizer.all_special_ids )
snake_case__ : Union[str, Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
snake_case__ : Optional[int] = self.tokenizer.decode(__a , skip_special_tokens=__a )
snake_case__ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowercase ( self : str ):
snake_case__ : Optional[int] = """fr"""
snake_case__ : List[Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase ( self : int ):
snake_case__ : Any = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case__ : Dict = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 648
| 1
|
def __a ( __lowerCAmelCase = 1000 ) -> int:
SCREAMING_SNAKE_CASE : int = 2**power
SCREAMING_SNAKE_CASE : Optional[Any] = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = 0
for i in list_num:
sum_of_num += int(__lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
_lowerCamelCase : Dict = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowerCamelCase : Tuple = solution(power)
print("""Sum of the digits is: """, result)
| 308
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 308
| 1
|
import cva
import numpy as np
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return str(self.k )
def _lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(lowerCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ ,A_ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 393
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A_ = None
A_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : bool = True
UpperCAmelCase : Optional[str] = None
# Automatically constructed
UpperCAmelCase : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase : str = field(default="""Image""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.pa_type
def _lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _lowercase ( self : Dict , lowerCAmelCase_ : dict , lowerCAmelCase_ : Any=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = PIL.Image.open(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ = path.split('''::''' )[-1]
try:
SCREAMING_SNAKE_CASE_ = string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )['''repo_id''']
SCREAMING_SNAKE_CASE_ = token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
SCREAMING_SNAKE_CASE_ = None
with xopen(lowerCAmelCase_ , '''rb''' , use_auth_token=lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE_ = BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowercase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''bytes''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''path''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def _lowercase ( self : Dict , lowerCAmelCase_ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ : List[str] ):
with xopen(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase ( UpperCAmelCase )-> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ = image.format
else:
SCREAMING_SNAKE_CASE_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(UpperCAmelCase ,format=UpperCAmelCase )
return buffer.getvalue()
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if hasattr(UpperCAmelCase ,'''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
SCREAMING_SNAKE_CASE_ = array.dtype
SCREAMING_SNAKE_CASE_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ = dtype.kind
SCREAMING_SNAKE_CASE_ = dtype.itemsize
SCREAMING_SNAKE_CASE_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ = dtype_byteorder + dtype_kind + str(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.dtype(UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_ = PIL.Image.fromarray(array.astype(UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = first_non_null_value(UpperCAmelCase )
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase ,np.ndarray ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
elif isinstance(UpperCAmelCase ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 393
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = checkpoint
__lowercase = {}
__lowercase = vae_state_dict["""encoder.conv_in.weight"""]
__lowercase = vae_state_dict["""encoder.conv_in.bias"""]
__lowercase = vae_state_dict["""encoder.conv_out.weight"""]
__lowercase = vae_state_dict["""encoder.conv_out.bias"""]
__lowercase = vae_state_dict["""encoder.norm_out.weight"""]
__lowercase = vae_state_dict["""encoder.norm_out.bias"""]
__lowercase = vae_state_dict["""decoder.conv_in.weight"""]
__lowercase = vae_state_dict["""decoder.conv_in.bias"""]
__lowercase = vae_state_dict["""decoder.conv_out.weight"""]
__lowercase = vae_state_dict["""decoder.conv_out.bias"""]
__lowercase = vae_state_dict["""decoder.norm_out.weight"""]
__lowercase = vae_state_dict["""decoder.norm_out.bias"""]
__lowercase = vae_state_dict["""quant_conv.weight"""]
__lowercase = vae_state_dict["""quant_conv.bias"""]
__lowercase = vae_state_dict["""post_quant_conv.weight"""]
__lowercase = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
__lowercase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
__lowercase = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
__lowercase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
__lowercase = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(lowerCamelCase )
}
for i in range(lowerCamelCase ):
__lowercase = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__lowercase = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
__lowercase = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'down.{i}.block', """new""": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """encoder.mid.block""" in key]
__lowercase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowercase = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'mid.block_{i}', """new""": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
__lowercase = renew_vae_attention_paths(lowerCamelCase )
__lowercase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
conv_attn_to_linear(lowerCamelCase )
for i in range(lowerCamelCase ):
__lowercase = num_up_blocks - 1 - i
__lowercase = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__lowercase = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
__lowercase = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'up.{block_id}.block', """new""": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """decoder.mid.block""" in key]
__lowercase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowercase = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'mid.block_{i}', """new""": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
__lowercase = renew_vae_attention_paths(lowerCamelCase )
__lowercase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
conv_attn_to_linear(lowerCamelCase )
return new_checkpoint
def snake_case ( lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__lowercase = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
__lowercase = io.BytesIO(r.content )
__lowercase = OmegaConf.load(lowerCamelCase )
__lowercase = 512
__lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
__lowercase = {}
with safe_open(lowerCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
__lowercase = f.get_tensor(lowerCamelCase )
else:
__lowercase = torch.load(lowerCamelCase , map_location=lowerCamelCase )["""state_dict"""]
# Convert the VAE model.
__lowercase = create_vae_diffusers_config(lowerCamelCase , image_size=lowerCamelCase )
__lowercase = custom_convert_ldm_vae_checkpoint(lowerCamelCase , lowerCamelCase )
__lowercase = AutoencoderKL(**lowerCamelCase )
vae.load_state_dict(lowerCamelCase )
vae.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
__UpperCamelCase : Tuple = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self :Any , _lowerCamelCase :str , _lowerCamelCase :Dict=13 , _lowerCamelCase :str=7 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :str=True , _lowerCamelCase :Optional[Any]=True , _lowerCamelCase :List[Any]=True , _lowerCamelCase :Union[str, Any]=99 , _lowerCamelCase :Dict=32 , _lowerCamelCase :Any=5 , _lowerCamelCase :Optional[Any]=4 , _lowerCamelCase :Optional[int]=37 , _lowerCamelCase :List[Any]="gelu" , _lowerCamelCase :Any=0.1 , _lowerCamelCase :List[str]=0.1 , _lowerCamelCase :Any=512 , _lowerCamelCase :List[Any]=16 , _lowerCamelCase :List[Any]=2 , _lowerCamelCase :Optional[Any]=0.02 , _lowerCamelCase :Tuple=3 , _lowerCamelCase :List[str]=4 , _lowerCamelCase :List[Any]=None , ):
'''simple docstring'''
UpperCamelCase_ : Any =parent
UpperCamelCase_ : Any =batch_size
UpperCamelCase_ : Any =seq_length
UpperCamelCase_ : Tuple =is_training
UpperCamelCase_ : int =use_input_mask
UpperCamelCase_ : List[Any] =use_token_type_ids
UpperCamelCase_ : Dict =use_labels
UpperCamelCase_ : Union[str, Any] =vocab_size
UpperCamelCase_ : Optional[int] =hidden_size
UpperCamelCase_ : Union[str, Any] =num_hidden_layers
UpperCamelCase_ : int =num_attention_heads
UpperCamelCase_ : Optional[Any] =intermediate_size
UpperCamelCase_ : int =hidden_act
UpperCamelCase_ : int =hidden_dropout_prob
UpperCamelCase_ : int =attention_probs_dropout_prob
UpperCamelCase_ : Optional[int] =max_position_embeddings
UpperCamelCase_ : Union[str, Any] =type_vocab_size
UpperCamelCase_ : Dict =type_sequence_label_size
UpperCamelCase_ : int =initializer_range
UpperCamelCase_ : Tuple =num_labels
UpperCamelCase_ : str =num_choices
UpperCamelCase_ : List[str] =scope
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : List[Any] =None
if self.use_input_mask:
UpperCamelCase_ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : str =None
if self.use_token_type_ids:
UpperCamelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Tuple =None
UpperCamelCase_ : Tuple =None
UpperCamelCase_ : Dict =None
if self.use_labels:
UpperCamelCase_ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : str =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Tuple , _lowerCamelCase :Dict , _lowerCamelCase :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =NystromformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Optional[int] =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
UpperCamelCase_ : Optional[int] =model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :Tuple , _lowerCamelCase :Any , _lowerCamelCase :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :List[Any] , _lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : Tuple =NystromformerForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[Any] =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :List[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Tuple , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =NystromformerForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Any =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Tuple , _lowerCamelCase :int , _lowerCamelCase :Optional[int] , _lowerCamelCase :List[str] , _lowerCamelCase :Optional[int] , _lowerCamelCase :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Any =self.num_labels
UpperCamelCase_ : Union[str, Any] =NystromformerForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Union[str, Any] =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :Dict , _lowerCamelCase :str , _lowerCamelCase :Dict , _lowerCamelCase :int , _lowerCamelCase :Optional[int] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.num_labels
UpperCamelCase_ : Any =NystromformerForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Dict =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self :str , _lowerCamelCase :Dict , _lowerCamelCase :Tuple , _lowerCamelCase :Tuple , _lowerCamelCase :Any , _lowerCamelCase :List[Any] , _lowerCamelCase :int , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : str =self.num_choices
UpperCamelCase_ : Dict =NystromformerForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Tuple =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : Dict =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : Dict =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) : str =config_and_inputs
UpperCamelCase_ : List[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =NystromformerModelTester(self )
UpperCamelCase_ : str =ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ : List[Any] =type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Optional[int] =NystromformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
UpperCamelCase_ : Optional[Any] =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
UpperCamelCase_ : Optional[int] =model(_lowerCamelCase )[0]
UpperCamelCase_ : Optional[Any] =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase_ : Optional[int] =torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] ='the [MASK] of Belgium is Brussels'
UpperCamelCase_ : Optional[int] =AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
UpperCamelCase_ : Optional[int] =NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
UpperCamelCase_ : str =tokenizer(_lowerCamelCase , return_tensors='pt' )
with torch.no_grad():
UpperCamelCase_ : Optional[Any] =model(encoding.input_ids ).logits
UpperCamelCase_ : Tuple =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_lowerCamelCase ) , 'capital' )
| 357
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def A_ ( __lowercase , __lowercase , __lowercase = 1 , __lowercase = 1 , __lowercase = 1.0e4 , __lowercase = False , __lowercase = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
UpperCamelCase_ : Optional[int] =float(embedding_dim // 2 )
UpperCamelCase_ : Optional[Any] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase_ : List[Any] =min_timescale * jnp.exp(jnp.arange(__lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase_ : int =jnp.expand_dims(__lowercase , 1 ) * jnp.expand_dims(__lowercase , 0 )
# scale embeddings
UpperCamelCase_ : List[str] =scale * emb
if flip_sin_to_cos:
UpperCamelCase_ : Tuple =jnp.concatenate([jnp.cos(__lowercase ), jnp.sin(__lowercase )] , axis=1 )
else:
UpperCamelCase_ : Tuple =jnp.concatenate([jnp.sin(__lowercase ), jnp.cos(__lowercase )] , axis=1 )
UpperCamelCase_ : List[Any] =jnp.reshape(__lowercase , [jnp.shape(__lowercase )[0], embedding_dim] )
return signal
class a__ ( nn.Module ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = jnp.floataa
@nn.compact
def __call__( self :Optional[Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Dict =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_lowerCamelCase )
UpperCamelCase_ : Any =nn.silu(_lowerCamelCase )
UpperCamelCase_ : Tuple =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_lowerCamelCase )
return temb
class a__ ( nn.Module ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = False
UpperCAmelCase__ = 1
@nn.compact
def __call__( self :Union[str, Any] , _lowerCamelCase :Dict ):
'''simple docstring'''
return get_sinusoidal_embeddings(
_lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 357
| 1
|
def lowerCamelCase__ ( _lowerCamelCase ) ->Tuple:
_UpperCAmelCase =[]
_UpperCAmelCase =[]
_UpperCAmelCase ={
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
_UpperCAmelCase =len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(_lowerCamelCase ) , "Postfix".center(_lowerCamelCase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ) , ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=" | " , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ) , ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=" | " , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase__ ( _lowerCamelCase ) ->List[str]:
_UpperCAmelCase =list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
_UpperCAmelCase =")" # change "(" to ")"
elif infix[i] == ")":
_UpperCAmelCase ="(" # change ")" to "("
return (infix_2_postfix("".join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
snake_case__ : List[str] = input('\nEnter an Infix Equation = ') # Input an Infix equation
snake_case__ : int = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 592
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
snake_case__ : List[Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
snake_case__ : str = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
snake_case__ : Tuple = '▁'
class _a ( A__ ):
"""simple docstring"""
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case =["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase =AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_UpperCAmelCase =vocab_file
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_UpperCAmelCase ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase =len(self.sp_model ) - 1
_UpperCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase =[self.cls_token_id]
_UpperCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
_UpperCAmelCase =[self.sep_token_id]
_UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ={self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase =self.sp_model.PieceToId(_snake_case )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =[]
_UpperCAmelCase =""
_UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
_UpperCAmelCase =True
_UpperCAmelCase =[]
else:
current_sub_tokens.append(_snake_case )
_UpperCAmelCase =False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def __getstate__( self ):
_UpperCAmelCase =self.__dict__.copy()
_UpperCAmelCase =None
return state
def __setstate__( self , _snake_case ):
_UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase ={}
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , "wb" ) as fi:
_UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 592
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a :Tuple = logging.get_logger(__name__)
__a :int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if config is None:
assert isinstance(self.model , UpperCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
A_ = self.model.config
else:
A_ = config
A_ = data_args
A_ = self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
" padding.." )
if self.args.label_smoothing == 0:
A_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A_ = label_smoothed_nll_loss
def __A ( self : Dict , UpperCAmelCase : int ):
if self.optimizer is None:
A_ = ["bias", "LayerNorm.weight"]
A_ = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
A_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A_ = Adafactor
A_ = {"scale_parameter": False, "relative_step": False}
else:
A_ = AdamW
A_ = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
A_ = self.args.learning_rate
if self.sharded_ddp:
A_ = OSS(
params=UpperCAmelCase , optim=UpperCAmelCase , **UpperCAmelCase , )
else:
A_ = optimizer_cls(UpperCAmelCase , **UpperCAmelCase )
if self.lr_scheduler is None:
A_ = self._get_lr_scheduler(UpperCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase )
return scheduler
def __A ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
A_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A_ , A_ = model(**UpperCAmelCase , labels=UpperCAmelCase , use_cache=UpperCAmelCase )[:2]
else:
# compute label smoothed loss
A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
A_ = torch.nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
A_ , A_ = self.loss_fn(UpperCAmelCase , UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __A ( self : int , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = inputs.pop("labels" )
A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return loss
def __A ( self : Optional[Any] , UpperCAmelCase : nn.Module , UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase : bool , UpperCAmelCase : Optional[List[str]] = None , ):
A_ = self._prepare_inputs(UpperCAmelCase )
A_ = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A_ = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] )
A_ = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] )
return (loss, logits, labels)
def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
# If PAD token is not defined at least EOS token has to be defined
A_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f''' padded to `max_length`={max_length}''' )
A_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A_ = tensor
return padded_tensor
| 86
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291
| 0
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class snake_case_ (lowercase__ ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Any = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = 5
# Realm tok
UpperCAmelCase_ : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,"realm_tokenizer")
os.makedirs(lowercase ,exist_ok=lowercase)
UpperCAmelCase_ : Optional[int] = os.path.join(lowercase ,VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file ,"w" ,encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
UpperCAmelCase_ : int = os.path.join(self.tmpdirname ,"realm_block_records")
os.makedirs(lowercase ,exist_ok=lowercase)
def A_ ( self):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"realm_tokenizer"))
def A_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = RealmConfig(num_block_records=self.num_block_records)
return config
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
})
return dataset
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] ,dtype=lowercase ,)
return block_records
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_config()
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_retriever()
UpperCAmelCase_ : Tuple = retriever.tokenizer
UpperCAmelCase_ : str = np.array([0, 3] ,dtype="long")
UpperCAmelCase_ : Tuple = tokenizer(["Test question"]).input_ids
UpperCAmelCase_ : Union[str, Any] = tokenizer(
["the fourth"] ,add_special_tokens=lowercase ,return_token_type_ids=lowercase ,return_attention_mask=lowercase ,).input_ids
UpperCAmelCase_ : str = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = retriever(
lowercase ,lowercase ,answer_ids=lowercase ,max_length=lowercase ,return_tensors="np")
self.assertEqual(len(lowercase) ,2)
self.assertEqual(len(lowercase) ,2)
self.assertEqual(len(lowercase) ,2)
self.assertEqual(concat_inputs.input_ids.shape ,(2, 10))
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] ,)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.get_config()
UpperCAmelCase_ : List[str] = self.get_dummy_retriever()
UpperCAmelCase_ : Optional[int] = retriever.tokenizer
UpperCAmelCase_ : Dict = np.array([0, 3, 5] ,dtype="long")
UpperCAmelCase_ : Optional[int] = tokenizer(["Test question"]).input_ids
UpperCAmelCase_ : Union[str, Any] = tokenizer(
["the fourth", "longer longer"] ,add_special_tokens=lowercase ,return_token_type_ids=lowercase ,return_attention_mask=lowercase ,).input_ids
UpperCAmelCase_ : Any = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = retriever(
lowercase ,lowercase ,answer_ids=lowercase ,max_length=lowercase ,return_tensors="np")
self.assertEqual([False, True, True] ,lowercase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,lowercase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,"realm_block_records"))
# Test local path
UpperCAmelCase_ : str = retriever.from_pretrained(os.path.join(self.tmpdirname ,"realm_block_records"))
self.assertEqual(retriever.block_records[0] ,b"This is the first record")
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download") as mock_hf_hub_download:
UpperCAmelCase_ : Optional[int] = os.path.join(
os.path.join(self.tmpdirname ,"realm_block_records") ,_REALM_BLOCK_RECORDS_FILENAME)
UpperCAmelCase_ : Tuple = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa")
self.assertEqual(retriever.block_records[0] ,b"This is the first record")
| 455
|
import logging
from transformers import PretrainedConfig
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """bertabs"""
def __init__( self ,lowercase=30522 ,lowercase=512 ,lowercase=6 ,lowercase=512 ,lowercase=8 ,lowercase=512 ,lowercase=0.2 ,lowercase=6 ,lowercase=768 ,lowercase=8 ,lowercase=2048 ,lowercase=0.2 ,**lowercase ,):
"""simple docstring"""
super().__init__(**lowercase)
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = max_pos
UpperCAmelCase_ : List[str] = enc_layers
UpperCAmelCase_ : List[str] = enc_hidden_size
UpperCAmelCase_ : Any = enc_heads
UpperCAmelCase_ : Any = enc_ff_size
UpperCAmelCase_ : Optional[int] = enc_dropout
UpperCAmelCase_ : List[str] = dec_layers
UpperCAmelCase_ : List[Any] = dec_hidden_size
UpperCAmelCase_ : List[str] = dec_heads
UpperCAmelCase_ : List[Any] = dec_ff_size
UpperCAmelCase_ : List[str] = dec_dropout
| 455
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase :
def __init__(self : Any , A__ : List[str]=2 , A__ : Union[str, Any]=3 , A__ : Optional[Any]=6_4 , A__ : Optional[int]=None ) -> List[Any]:
lowercase = np.random.default_rng(A__ )
lowercase = length
lowercase = rng.normal(size=(length,) ).astype(np.floataa )
lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__(self : Any ) -> str:
return self.length
def __getitem__(self : Any , A__ : Optional[int] ) -> Any:
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase ( torch.nn.Module ):
def __init__(self : Tuple , A__ : str=0 , A__ : str=0 , A__ : Union[str, Any]=False ) -> Tuple:
super().__init__()
lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase = True
def UpperCAmelCase__ (self : Optional[int] , A__ : str=None ) -> Dict:
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowercase = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase ( torch.nn.Module ):
def __init__(self : Optional[int] , A__ : List[Any]=0 , A__ : Any=0 , A__ : Optional[Any]=False ) -> Optional[int]:
super().__init__()
lowercase = torch.nn.Parameter(torch.tensor(A__ ).float() )
lowercase = torch.nn.Parameter(torch.tensor(A__ ).float() )
lowercase = True
def UpperCAmelCase__ (self : str , A__ : str=None ) -> List[Any]:
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowercase = False
return x * self.a + self.b
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
lowercase = load_dataset("csv" , data_files=lowerCAmelCase_ )
lowercase = datasets["train"].unique("label" )
lowercase = {v: i for i, v in enumerate(lowerCAmelCase_ )}
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" )
if "label" in examples:
lowercase = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase = DataLoader(tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=2 )
lowercase = DataLoader(tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 310
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
lowercase = set({"(", "[", "{"} )
lowercase = set({")", "]", "}"} )
lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = input("Enter sequence of brackets: " )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , "is balanced" )
else:
print(lowerCAmelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 310
| 1
|
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase_ = str(abs(__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 23
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23
| 1
|
from typing import Any
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not input_list:
return []
lowercase__ : List[Any] = [input_list.count(lowerCAmelCase_ ) for value in input_list]
lowercase__ : Union[str, Any] = max(lowerCAmelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 496
|
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : int , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
snake_case_ = self.values[key]
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=None ) -> int:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 283
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Optional[Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCAmelCase : Union[str, Any] = {
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = TaTokenizer
SCREAMING_SNAKE_CASE__ = []
def __init__( self ,a_=None ,a_=None ,a_="</s>" ,a_="<unk>" ,a_="<pad>" ,a_=100 ,a_=None ,**a_ ,):
"""simple docstring"""
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ = [f'<extra_id_{i}>' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCAmelCase__ = len(set(filter(lambda a_ : bool('extra_id_' in str(a_ ) ) ,a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
a_ ,tokenizer_file=a_ ,eos_token=a_ ,unk_token=a_ ,pad_token=a_ ,extra_ids=a_ ,additional_special_tokens=a_ ,**a_ ,)
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
lowerCAmelCase__ = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a_ ,a_ ,a_ ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCAmelCase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' ,a_ ,)
return max_model_length
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file ,a_ )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
lowerCAmelCase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCAmelCase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return list(
set(filter(lambda a_ : bool(re.search(r'<extra_id_\d+>' ,a_ ) ) is not None ,self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return [self.convert_tokens_to_ids(a_ ) for token in self.get_sentinel_tokens()]
| 707
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
_lowerCAmelCase : Any = "Hello world! cécé herlolip"
_lowerCAmelCase : Union[str, Any] = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = BertAbsConfig(
temp_dir='.' , finetune_bert=snake_case__ , large=snake_case__ , share_emb=snake_case__ , use_bert_emb=snake_case__ , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase__ = torch.load(snake_case__ , lambda snake_case__ , snake_case__ : storage )
lowerCAmelCase__ = AbsSummarizer(snake_case__ , torch.device('cpu' ) , snake_case__ )
original.eval()
lowerCAmelCase__ = BertAbsSummarizer(snake_case__ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCAmelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCAmelCase__ = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) )
lowerCAmelCase__ = torch.tensor(snake_case__ ).unsqueeze(0 )
lowerCAmelCase__ = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) )
lowerCAmelCase__ = torch.tensor(snake_case__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase__ = encoder_input_ids
lowerCAmelCase__ = decoder_input_ids
lowerCAmelCase__ = lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = lowerCAmelCase__ = None
lowerCAmelCase__ = lowerCAmelCase__ = None
lowerCAmelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase__ = original(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
lowerCAmelCase__ = original.generator(snake_case__ )
lowerCAmelCase__ = new_model(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
lowerCAmelCase__ = new_model.generator(snake_case__ )
lowerCAmelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case__ ) )
lowerCAmelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case__ ) )
lowerCAmelCase__ = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 604
| 0
|
'''simple docstring'''
def __snake_case ( lowerCAmelCase : str ):
__UpperCAmelCase = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( lowerCAmelCase : str ):
__UpperCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__UpperCAmelCase = remove_duplicates(key.upper() )
__UpperCAmelCase = len(lowerCAmelCase )
# First fill cipher with key characters
__UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCAmelCase ) , 26 ):
__UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__UpperCAmelCase = alphabet[i - offset]
__UpperCAmelCase = char
return cipher_alphabet
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : dict[str, str] ):
return "".join(cipher_map.get(lowerCAmelCase , lowerCAmelCase ) for ch in message.upper() )
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : dict[str, str] ):
__UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCAmelCase , lowerCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__UpperCAmelCase = input('Enter message to encode or decode: ' ).strip()
__UpperCAmelCase = input('Enter keyword: ' ).strip()
__UpperCAmelCase = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
__UpperCAmelCase = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
__UpperCAmelCase = create_cipher_map(lowerCAmelCase )
print(func(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 396
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __snake_case ( lowerCAmelCase : int = 200_0000 ):
__UpperCAmelCase = [0]
__UpperCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__UpperCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
__UpperCAmelCase = 0
# an estimate of b, using the quadratic formula
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the triangle number corresponding to b_floor
__UpperCAmelCase = 42
# the triangle number corresponding to b_ceil
__UpperCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__UpperCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__UpperCAmelCase = floor(lowerCAmelCase )
__UpperCAmelCase = ceil(lowerCAmelCase )
__UpperCAmelCase = triangle_numbers[b_floor]
__UpperCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_first_guess * triangle_a
__UpperCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_second_guess * triangle_a
__UpperCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 396
| 1
|
from __future__ import annotations
def _snake_case ( __snake_case , __snake_case ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
UpperCAmelCase_ : int = number_of_bytes // partitions
UpperCAmelCase_ : Any = []
for i in range(__snake_case ):
UpperCAmelCase_ : List[str] = i * bytes_per_partition + 1
UpperCAmelCase_ : List[str] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import operator
def _snake_case ( __snake_case , __snake_case = False , __snake_case = None ) -> list:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = operator.lt if reverse else operator.gt
UpperCAmelCase_ : int = solution or []
if not arr:
return solution
UpperCAmelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 455
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ) -> Dict:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=snake_case_ , )
return config, input_ids, attention_mask
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class a ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : int = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = FlaxDistilBertModelTester(self )
@slow
def __A ( self ) -> Any:
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("distilbert-base-uncased" )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
_UpperCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ )[0]
_UpperCAmelCase = (1, 11, 768)
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) )
| 426
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( snake_case_ ):
UpperCAmelCase__ = '''openai/whisper-base'''
UpperCAmelCase__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
UpperCAmelCase__ = '''transcriber'''
UpperCAmelCase__ = WhisperProcessor
UpperCAmelCase__ = WhisperForConditionalGeneration
UpperCAmelCase__ = ['''audio''']
UpperCAmelCase__ = ['''text''']
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ) -> Dict:
return self.pre_processor(__lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict ) -> List[str]:
return self.model.generate(inputs=__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : int ) -> List[str]:
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 468
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] ):
'''simple docstring'''
return choice(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase:list[int] , __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
__magic_name__ = [e for e in lst if e < pivot]
__magic_name__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase_ ( _lowercase ):
def lowerCAmelCase_ ( self : Dict ):
__A : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__A , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__A , """num_encoder_blocks""" ) )
class lowerCamelCase_ :
def __init__( self : List[str] , __A : str , __A : str=13 , __A : str=64 , __A : Any=3 , __A : List[str]=4 , __A : Union[str, Any]=[2, 2, 2, 2] , __A : Any=[8, 4, 2, 1] , __A : int=[16, 32, 64, 128] , __A : List[str]=[1, 4, 8, 16] , __A : Optional[int]=[1, 2, 4, 8] , __A : Optional[int]=True , __A : List[Any]=True , __A : List[Any]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0_2 , __A : Dict=3 , __A : Any=None , ):
__A : Optional[Any] = parent
__A : int = batch_size
__A : Optional[int] = image_size
__A : Tuple = num_channels
__A : List[Any] = num_encoder_blocks
__A : List[Any] = sr_ratios
__A : Optional[int] = depths
__A : str = hidden_sizes
__A : str = downsampling_rates
__A : Any = num_attention_heads
__A : Optional[int] = is_training
__A : Union[str, Any] = use_labels
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : Any = initializer_range
__A : Dict = num_labels
__A : List[Any] = scope
def lowerCAmelCase_ ( self : int ):
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Optional[Any] = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : Any = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __A : Dict , __A : List[Any] , __A : str ):
__A : List[Any] = SegformerModel(config=__A )
model.to(__A )
model.eval()
__A : Dict = model(__A )
__A : Union[str, Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCAmelCase_ ( self : List[str] , __A : int , __A : Tuple , __A : List[str] ):
__A : int = self.num_labels
__A : int = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
__A : int = model(__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__A : Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Optional[Any] ):
__A : List[str] = 1
__A : Union[str, Any] = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
__A : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__A )
__A : int = model(__A , labels=__A )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A , __A , __A : Tuple = config_and_inputs
__A : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : List[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase : List[Any] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase : int = True
_lowercase : Optional[int] = False
_lowercase : Union[str, Any] = False
_lowercase : int = False
def lowerCAmelCase_ ( self : Tuple ):
__A : str = SegformerModelTester(self )
__A : Optional[int] = SegformerConfigTester(self , config_class=__A )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(__A )
__A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Tuple = [*signature.parameters.keys()]
__A : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def lowerCAmelCase_ ( self : int ):
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = True
for model_class in self.all_model_classes:
__A : Optional[Any] = True
__A : Tuple = False
__A : Union[str, Any] = True
__A : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(__A , __A ) )
__A : Optional[int] = outputs.attentions
__A : List[str] = sum(self.model_tester.depths )
self.assertEqual(len(__A ) , __A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : Optional[Any] = True
__A : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(__A , __A ) )
__A : str = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
__A : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
__A : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__A : Tuple = (self.model_tester.image_size // 32) ** 2
__A : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__A : Union[str, Any] = len(__A )
# Check attention is always last and order is fine
__A : Union[str, Any] = True
__A : List[Any] = True
__A : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
__A : List[Any] = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
__A : Tuple = (self.model_tester.image_size // 4) ** 2
__A : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCAmelCase_ ( self : Tuple ):
def check_hidden_states_output(__A : Optional[int] , __A : int , __A : Dict ):
__A : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(__A , __A ) )
__A : Optional[Any] = outputs.hidden_states
__A : Dict = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Any = True
check_hidden_states_output(__A , __A , __A )
def lowerCAmelCase_ ( self : str ):
if not self.model_tester.is_training:
return
__A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
__A : List[str] = model_class(__A )
model.to(__A )
model.train()
__A : Any = self._prepare_for_class(__A , __A , return_labels=__A )
__A : Optional[int] = model(**__A ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Dict = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : str ):
# only resize + normalize
__A : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__A : Optional[int] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__A )
__A : int = prepare_img()
__A : List[Any] = image_processor(images=__A , return_tensors="""pt""" )
__A : Any = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__A : List[str] = model(__A )
__A : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
__A : Any = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
# only resize + normalize
__A : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__A : Tuple = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__A )
__A : Optional[int] = prepare_img()
__A : int = image_processor(images=__A , return_tensors="""pt""" )
__A : List[Any] = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__A : Dict = model(__A )
__A : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
__A : Optional[int] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1e-1 ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# only resize + normalize
__A : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__A : List[Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__A )
__A : Any = prepare_img()
__A : str = image_processor(images=__A , return_tensors="""pt""" )
__A : int = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__A : int = model(__A )
__A : Optional[int] = outputs.logits.detach().cpu()
__A : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(500, 300)] )
__A : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __A )
__A : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__A )
__A : Dict = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __A )
| 17
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case__ = logging.getLogger(__name__)
def lowerCamelCase__ ( a : int , a : List[Any] ) -> Any:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())})
lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'})
lowerCamelCase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'})
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ :Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , a )
# Set seed
set_seed(training_args.seed )
try:
a__ :Tuple = processors[data_args.task_name]()
a__ :str = processor.get_labels()
a__ :Any = len(a )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ :str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
a__ :Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__ :str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , )
# Get datasets
a__ :Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a__ :Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a : EvalPrediction ) -> Dict:
a__ :List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a , p.label_ids )}
# Data collator
a__ :str = DataCollatorWithPadding(a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a__ :List[Any] = Trainer(
model=a , args=a , train_dataset=a , eval_dataset=a , compute_metrics=a , data_collator=a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ :str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ :Optional[Any] = trainer.evaluate()
a__ :List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , a , a )
writer.write("%s = %s\n" % (key, value) )
results.update(a )
return results
def lowerCamelCase__ ( a : Optional[Any] ) -> Tuple:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 373
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , __A : Union[str, Any]=10000 , __A : Tuple=6 , __A : Tuple=2048 , __A : Any=4 , __A : str=0.0 , __A : Tuple=True , __A : List[Any]="relu" , __A : Union[str, Any]=256 , __A : str=0.1 , __A : Optional[Any]=0.0 , __A : Tuple=0.0 , __A : Any=0.02 , __A : int=2 , __A : Union[str, Any]=True , __A : Tuple=1 , __A : List[str]=0 , __A : List[str]=2 , __A : Union[str, Any]=1024 , **__A : Tuple , ) ->List[str]:
"""simple docstring"""
a__ :Tuple = vocab_size
a__ :Dict = d_model
a__ :Optional[int] = decoder_ffn_dim
a__ :Optional[int] = decoder_layers
a__ :Optional[int] = decoder_attention_heads
a__ :Dict = dropout
a__ :Optional[Any] = attention_dropout
a__ :Any = activation_dropout
a__ :int = activation_function
a__ :Optional[int] = init_std
a__ :List[Any] = decoder_layerdrop
a__ :Optional[Any] = use_cache
a__ :List[str] = decoder_layers
a__ :int = scale_embedding # scale factor will be sqrt(d_model) if True
a__ :Optional[int] = max_target_positions
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , **__A , )
| 373
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =StableDiffusionXLImgaImgPipeline
lowerCamelCase__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase__ =PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel(a )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a )
SCREAMING_SNAKE_CASE : str = CLIPTextModelWithProjection(a )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a )
SCREAMING_SNAKE_CASE : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCamelCase ( self : Tuple , a : Any , a : Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
SCREAMING_SNAKE_CASE : Optional[Any] = image / 2 + 0.5
if str(a ).startswith("mps" ):
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionXLImgaImgPipeline(**a )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(**a ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionXLImgaImgPipeline(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Any = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt
SCREAMING_SNAKE_CASE : int = 3 * [inputs["prompt"]]
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**a )
SCREAMING_SNAKE_CASE : int = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Tuple = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE : List[Any] = 3 * [inputs.pop("prompt" )]
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Tuple = sd_pipe.encode_prompt(a , negative_prompt=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
**a , prompt_embeds=a , negative_prompt_embeds=a , pooled_prompt_embeds=a , negative_pooled_prompt_embeds=a , )
SCREAMING_SNAKE_CASE : int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any , a : int , a : str="cpu" , a : Dict=torch.floataa , a : Any=0 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE : List[str] = np.random.RandomState(a ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(a ).to(device=a , dtype=a )
SCREAMING_SNAKE_CASE : Dict = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_inputs(a )
SCREAMING_SNAKE_CASE : List[str] = pipe(**a ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 25
|
from __future__ import annotations
import time
A_ : Optional[Any] = list[tuple[int, int]]
A_ : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
__UpperCAmelCase = pos_x
__UpperCAmelCase = pos_y
__UpperCAmelCase = (pos_y, pos_x)
__UpperCAmelCase = goal_x
__UpperCAmelCase = goal_y
__UpperCAmelCase = parent
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase__ )
__UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase__ )
__UpperCAmelCase = [self.start]
__UpperCAmelCase = False
def lowerCAmelCase_ (self ) -> Path | None:
while self.node_queue:
__UpperCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__UpperCAmelCase = True
return self.retrace_path(lowercase__ )
__UpperCAmelCase = self.get_successors(lowercase__ )
for node in successors:
self.node_queue.append(lowercase__ )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ (self , lowercase__ ) -> list[Node]:
__UpperCAmelCase = []
for action in delta:
__UpperCAmelCase = parent.pos_x + action[1]
__UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase__ , lowercase__ , self.target.pos_y , self.target.pos_x , lowercase__ ) )
return successors
def lowerCAmelCase_ (self , lowercase__ ) -> Path:
__UpperCAmelCase = node
__UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ ) -> str:
__UpperCAmelCase = BreadthFirstSearch(lowercase__ , lowercase__ )
__UpperCAmelCase = BreadthFirstSearch(lowercase__ , lowercase__ )
__UpperCAmelCase = False
def lowerCAmelCase_ (self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__UpperCAmelCase = self.fwd_bfs.node_queue.pop(0 )
__UpperCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__UpperCAmelCase = True
return self.retrace_bidirectional_path(
lowercase__ , lowercase__ )
__UpperCAmelCase = current_bwd_node
__UpperCAmelCase = current_fwd_node
__UpperCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Path:
__UpperCAmelCase = self.fwd_bfs.retrace_path(lowercase__ )
__UpperCAmelCase = self.bwd_bfs.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A_ : Dict = (0, 0)
A_ : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A_ : Optional[int] = time.time()
A_ : Optional[int] = BreadthFirstSearch(init, goal)
A_ : Union[str, Any] = bfs.search()
A_ : str = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
A_ : int = time.time()
A_ : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
A_ : Optional[int] = bd_bfs.search()
A_ : Tuple = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 303
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] )-> int:
A__ = [False] * len(UpperCamelCase_ )
A__ = [-1] * len(UpperCamelCase_ )
def dfs(UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
A__ = True
A__ = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase_ , 1 - c )
for i in range(len(UpperCamelCase_ ) ):
if not visited[i]:
dfs(UpperCamelCase_ , 0 )
for i in range(len(UpperCamelCase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowercase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 526
|
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
class __UpperCamelCase ( enum.Enum ):
__A : int = """all_checks"""
__A : Tuple = """basic_checks"""
__A : List[Any] = """no_checks"""
class __UpperCamelCase ( A__ ):
pass
class __UpperCamelCase ( A__ ):
pass
class __UpperCamelCase ( A__ ):
pass
class __UpperCamelCase ( A__ ):
pass
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[dict] , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class __UpperCamelCase ( A__ ):
pass
class __UpperCamelCase ( A__ ):
pass
class __UpperCamelCase ( A__ ):
pass
class __UpperCamelCase ( A__ ):
pass
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[dict] , SCREAMING_SNAKE_CASE_ : dict ) -> int:
"""simple docstring"""
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
_UpperCAmelCase = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE_ ) )
logger.info('''All the splits matched successfully.''' )
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = True ) -> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE_ ), "checksum": checksum}
def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 32
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
UpperCamelCase__ : Optional[int] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_euler')
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : List[Any] = torch.manual_seed(0)
UpperCamelCase__ : Dict = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[str] = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_euler')
UpperCamelCase__ : Tuple = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : Any = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : Optional[int] = output.images
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : str = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_dpmpp_2m')
UpperCamelCase__ : int = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Union[str, Any] = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 701
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6
| 0
|
from __future__ import annotations
def a_ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : Optional[int] =array[indexa], array[indexa]
def a_ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if length > 1:
_lowerCamelCase : List[str] =int(length / 2 )
for i in range(SCREAMING_SNAKE_CASE__ , low + middle ):
comp_and_swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + middle , SCREAMING_SNAKE_CASE__ )
bitonic_merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
bitonic_merge(SCREAMING_SNAKE_CASE__ , low + middle , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if length > 1:
_lowerCamelCase : Tuple =int(length / 2 )
bitonic_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
bitonic_sort(SCREAMING_SNAKE_CASE__ , low + middle , SCREAMING_SNAKE_CASE__ , 0 )
bitonic_merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 464
|
def a_ ( SCREAMING_SNAKE_CASE__ : bytes ):
'''simple docstring'''
return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] )
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464
| 1
|
import pytest
__UpperCamelCase : int = '__dummy_dataset1__'
__UpperCamelCase : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def snake_case_ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def snake_case_ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = dataset_loading_script_name
UpperCAmelCase_ : Union[str, Any] = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__lowercase )
UpperCAmelCase_ : List[str] = script_dir / F'''{script_name}.py'''
with open(__lowercase , '''w''' ) as f:
f.write(__lowercase )
return str(__lowercase )
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
'''simple docstring'''
from string import ascii_uppercase
__lowerCAmelCase : Union[str, Any] ={str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
A__ = ""
A__ = 0
A__ = 0
while div != 1:
A__, A__ = divmod(_lowerCamelCase , _lowerCamelCase )
if base >= 11 and 9 < mod < 36:
A__ = ALPHABET_VALUES[str(_lowerCamelCase )]
else:
A__ = str(_lowerCamelCase )
new_value += actual_value
A__ = num // base
A__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 440
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def UpperCAmelCase_ ( *lowercase_ :Optional[Any] , **lowercase_ :List[Any] )-> Optional[int]:
pass
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : int =(
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
__lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :Dict )-> int:
A__ = pipeline(
"document-question-answering" , model=lowercase_ , tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = INVOICE_URL
A__ = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
A__ = "What is the placebo?"
A__ = [
{
"image": load_image(lowercase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase_ ( self :str , lowercase_ :List[Any] , lowercase_ :List[str] )-> List[str]:
A__ = dqa_pipeline(lowercase_ , top_k=2 )
self.assertEqual(
lowercase_ , [
[
{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ ), "start": ANY(lowercase_ ), "end": ANY(lowercase_ )},
{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ ), "start": ANY(lowercase_ ), "end": ANY(lowercase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
A__ = INVOICE_URL
A__ = "How many cats are there?"
A__ = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , lowercase_ )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , lowercase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A__ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(lowercase_ , [] )
# We can optionnally pass directly the words and bounding boxes
A__ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A__ = []
A__ = []
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , words=lowercase_ , boxes=lowercase_ , top_k=2 )
self.assertEqual(lowercase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
A__ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self :Tuple )-> Optional[Any]:
A__ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self :List[Any] )-> Any:
A__ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowercase_ )
A__ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowercase_ , revision="3dc6de3" , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
A__ = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self :Optional[int] )-> Union[str, Any]:
A__ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowercase_ )
A__ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowercase_ , revision="3dc6de3" , max_seq_len=50 , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
A__ = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
A__ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def UpperCAmelCase_ ( self :Optional[Any] )-> List[str]:
pass
| 440
| 1
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case__ : int = """CompVis/stable-diffusion-v1-1"""
snake_case__ : Dict = """CompVis/stable-diffusion-v1-2"""
snake_case__ : Dict = """CompVis/stable-diffusion-v1-3"""
snake_case__ : Optional[Any] = """CompVis/stable-diffusion-v1-4"""
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , lowerCamelCase : bool = True , ):
'''simple docstring'''
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
__lowercase = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
__lowercase = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
__lowercase = StableDiffusionPipeline(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , requires_safety_checker=lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return {k: getattr(self , lowerCamelCase ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Tuple , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def _snake_case ( self : int , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Any , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def _snake_case ( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def _snake_case ( self : Any , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def _snake_case ( self : Optional[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 655
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655
| 1
|
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if digit_amount > 0:
return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase )
return number - int(_UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 671
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[int] = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671
| 1
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a_ : Tuple = True
from torch.cuda.amp import autocast
a_ : int = logging.getLogger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCamelCase )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__UpperCamelCase =field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__UpperCamelCase =field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__UpperCamelCase =field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__UpperCamelCase =field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__UpperCamelCase =field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__UpperCamelCase =field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCamelCase =field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__UpperCamelCase =list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =42
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
def __call__( self : Optional[Any] , snake_case__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [{'input_values': feature['input_values']} for feature in features]
SCREAMING_SNAKE_CASE = [{'input_ids': feature['labels']} for feature in features]
SCREAMING_SNAKE_CASE = self.processor.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = self.processor.pad(
labels=snake_case__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
SCREAMING_SNAKE_CASE = labels
return batch
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : List[str] , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(snake_case__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case__ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case__ )
else:
loss.backward()
return loss.detach()
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
SCREAMING_SNAKE_CASE = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(_UpperCamelCase : Any ):
SCREAMING_SNAKE_CASE = re.sub(_UpperCamelCase , '' , batch['sentence'] ).lower() + ' '
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(_UpperCamelCase , remove_columns=['sentence'] )
SCREAMING_SNAKE_CASE = eval_dataset.map(_UpperCamelCase , remove_columns=['sentence'] )
def extract_all_chars(_UpperCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = ' '.join(batch['text'] )
SCREAMING_SNAKE_CASE = list(set(_UpperCamelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
SCREAMING_SNAKE_CASE = {v: k for k, v in enumerate(_UpperCamelCase )}
SCREAMING_SNAKE_CASE = vocab_dict[' ']
del vocab_dict[" "]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(_UpperCamelCase , _UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCamelCase ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(_UpperCamelCase ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(batch['path'] )
SCREAMING_SNAKE_CASE = resampler(_UpperCamelCase ).squeeze().numpy()
SCREAMING_SNAKE_CASE = 1_60_00
SCREAMING_SNAKE_CASE = batch['text']
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_UpperCamelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_UpperCamelCase : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
SCREAMING_SNAKE_CASE = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(_UpperCamelCase )
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_UpperCamelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE = datasets.load_metric('wer' )
def compute_metrics(_UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = pred.predictions
SCREAMING_SNAKE_CASE = np.argmax(_UpperCamelCase , axis=-1 )
SCREAMING_SNAKE_CASE = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE = processor.batch_decode(pred.label_ids , group_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = wer_metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorCTCWithPadding(processor=_UpperCamelCase , padding=_UpperCamelCase )
# Initialize our Trainer
SCREAMING_SNAKE_CASE = CTCTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , compute_metrics=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('train' , _UpperCamelCase )
trainer.save_metrics('train' , _UpperCamelCase )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_val_samples if data_args.max_val_samples is not None else len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 673
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : Optional[Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ : Optional[Any] = {
"gpt-neox-20b": 2_048,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__=None , A__=None , A__=None , A__="<|endoftext|>" , A__="<|endoftext|>" , A__="<|endoftext|>" , A__=False , **A__ , ) -> Any:
super().__init__(
A__ , A__ , tokenizer_file=A__ , unk_token=A__ , bos_token=A__ , eos_token=A__ , add_prefix_space=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(A__ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**A__ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCamelCase ( self , A__ ) -> List[int]:
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 591
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=2 , A__=32 , A__=16 , A__=3 , A__=True , A__=True , A__=32 , A__=4 , A__=[0, 1, 2, 3] , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.02 , A__=3 , A__=[1, 3_84, 24, 24] , A__=True , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = backbone_out_indices
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = backbone_featmap_shape
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = DPTModel(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , labels=A__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = DPTModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = True
if model_class in get_values(A__ ):
continue
_SCREAMING_SNAKE_CASE = model_class(A__ )
model.to(A__ )
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ , return_labels=A__ )
_SCREAMING_SNAKE_CASE = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing:
continue
_SCREAMING_SNAKE_CASE = model_class(A__ )
model.to(A__ )
model.gradient_checkpointing_enable()
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ , return_labels=A__ )
_SCREAMING_SNAKE_CASE = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(A__ )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=A__ )
# Skip the check for the backbone
_SCREAMING_SNAKE_CASE = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self ) -> Any:
pass
@slow
def UpperCamelCase ( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_SCREAMING_SNAKE_CASE = DPTModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = """add"""
with self.assertRaises(A__ ):
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation(A__ )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(A__ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A__ )
_SCREAMING_SNAKE_CASE = outputs.predicted_depth
# verify the predicted depth
_SCREAMING_SNAKE_CASE = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , A__ , atol=1E-4 ) )
| 591
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : List[str] = ['image_processor', 'tokenizer']
__snake_case : Optional[int] = 'Pix2StructImageProcessor'
__snake_case : Optional[int] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = False
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = 2_048 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ :Any = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ :List[Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ :Any = text_encoding.pop('input_ids' )
else:
SCREAMING_SNAKE_CASE_ :Any = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ :Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 233
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE__ : Any = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE__ : int = BASE_URL + """/user"""
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.environ.get("""USER_TOKEN""", """""")
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Dict = {
'Authorization': F'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 233
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict ='''distilbert'''
a : List[str] ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=1_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=4 * 7_6_8 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Optional[int] = sinusoidal_pos_embds
UpperCamelCase_: Union[str, Any] = n_layers
UpperCamelCase_: Optional[int] = n_heads
UpperCamelCase_: int = dim
UpperCamelCase_: Tuple = hidden_dim
UpperCamelCase_: Any = dropout
UpperCamelCase_: Optional[Any] = attention_dropout
UpperCamelCase_: List[str] = activation
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = qa_dropout
UpperCamelCase_: List[str] = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 57
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: List[Any]=1.0 , SCREAMING_SNAKE_CASE: Any=None , SCREAMING_SNAKE_CASE: Any=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : List[str]=2_000 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Optional[Any]=160 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=4_000 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=True , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = padding_value
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = do_normalize
_lowerCAmelCase = feature_size
_lowerCAmelCase = chunk_length
_lowerCAmelCase = hop_length
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(UpperCAmelCase_ : Optional[Any] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
_lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCAmelCase = WhisperFeatureExtractionTester(self )
def __lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(UpperCAmelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(UpperCAmelCase_ )
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test truncation required
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
_lowerCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated]
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
import torch
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = WhisperFeatureExtractor()
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) )
def __lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = self._load_datasamples(1 )[0]
_lowerCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_lowerCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
| 580
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def __lowerCAmelCase( __UpperCAmelCase="ro" ,__UpperCAmelCase="en" ,__UpperCAmelCase="wmt16" ,__UpperCAmelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
_lowercase : Optional[int] = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
_lowercase : str = datasets.load_dataset(__UpperCAmelCase ,__UpperCAmelCase )
if save_dir is None:
_lowercase : List[str] = F'''{dataset}-{pair}'''
_lowercase : List[str] = Path(__UpperCAmelCase )
save_dir.mkdir(exist_ok=__UpperCAmelCase )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_lowercase : int = 'val' if split == 'validation' else split
_lowercase : Union[str, Any] = save_dir.joinpath(F'''{fn}.source''' )
_lowercase : Optional[Any] = save_dir.joinpath(F'''{fn}.target''' )
_lowercase : str = src_path.open('w+' )
_lowercase : List[Any] = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_lowercase : Union[str, Any] = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 283
|
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
stooge(__UpperCAmelCase ,0 ,len(__UpperCAmelCase ) - 1 )
return arr
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowercase , _lowercase : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowercase : Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase ,__UpperCAmelCase ,(h - t) )
# Recursively sort last 2/3 elements
stooge(__UpperCAmelCase ,i + t ,(__UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase ,__UpperCAmelCase ,(h - t) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 283
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
| 1
|
"""simple docstring"""
lowerCamelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def a__ ( lowerCAmelCase__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowerCAmelCase__ )
UpperCAmelCase_ = "".join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ = b"=" * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
UpperCAmelCase_ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def a__ ( lowerCAmelCase__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (
"argument should be a bytes-like object or ASCII string, "
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
UpperCAmelCase_ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCAmelCase_ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ = encoded_data[:-padding]
UpperCAmelCase_ = "".join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ = "".join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 50_003
lowerCamelCase = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="multi" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 14
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = '''gptj'''
lowerCAmelCase_ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , __lowercase : List[Any]=5_04_00 , __lowercase : str=20_48 , __lowercase : int=40_96 , __lowercase : Union[str, Any]=28 , __lowercase : str=16 , __lowercase : Dict=64 , __lowercase : List[Any]=None , __lowercase : Tuple="gelu_new" , __lowercase : Dict=0.0 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Optional[Any]=1E-5 , __lowercase : Dict=0.02 , __lowercase : Any=True , __lowercase : Optional[Any]=5_02_56 , __lowercase : Tuple=5_02_56 , __lowercase : List[Any]=False , **__lowercase : List[str] , ):
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = n_positions
snake_case_ = n_embd
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = rotary_dim
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , __lowercase : PretrainedConfig , __lowercase : str = "default" , __lowercase : List[PatchingSpec] = None , __lowercase : bool = False , ):
"""simple docstring"""
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase )
if not getattr(self._config , "pad_token_id" , __lowercase ):
# TODO: how to do that better?
snake_case_ = 0
@property
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction="inputs" )
snake_case_ = {0: "batch", 1: "past_sequence + sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return self._config.n_layer
@property
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return self._config.n_head
def snake_case__ ( self : Any , __lowercase : PreTrainedTokenizer , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional[TensorType] = None , ):
"""simple docstring"""
snake_case_ = super(__lowercase , self ).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
# We need to order the input in the way they appears in the forward()
snake_case_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_ , snake_case_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers )
]
snake_case_ = common_inputs["attention_mask"]
if self.use_past:
snake_case_ = ordered_inputs["attention_mask"].dtype
snake_case_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
return ordered_inputs
@property
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return 13
| 376
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def is_valid_tree(_A ) -> bool:
if node is None:
return True
if not isinstance(_A , _A ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_A ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_A , _A , _A ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _A , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _A )
)
return is_binary_search_tree_recursive_check(_A , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
| 1
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A_ :str = NewType('''DataClass''', Any)
A_ :List[str] = NewType('''DataClassType''', Any)
def A ( a_ ) -> Dict:
if isinstance(a_ ,a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def A ( a_ ) -> Callable[[str], Any]:
__UpperCamelCase : Optional[Any] ={str(a_ ): choice for choice in choices}
return lambda a_ : str_to_choice.get(a_ ,a_ )
def A ( *,
a_ = None ,a_ = None ,a_ = dataclasses.MISSING ,a_ = dataclasses.MISSING ,a_ = None ,**a_ ,) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__UpperCamelCase : List[Any] ={}
if aliases is not None:
__UpperCamelCase : Optional[int] =aliases
if help is not None:
__UpperCamelCase : int =help
return dataclasses.field(metadata=a_ ,default=a_ ,default_factory=a_ ,**a_ )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Iterable[DataClassType]
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
__UpperCamelCase : str =ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase__ )
if dataclasses.is_dataclass(lowerCamelCase__ ):
__UpperCamelCase : Tuple =[dataclass_types]
__UpperCamelCase : Any =list(lowerCamelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase__ )
@staticmethod
def __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =f'--{field.name}'
__UpperCamelCase : List[Any] =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase__ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
__UpperCamelCase : List[str] =kwargs.pop('aliases' , [] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : int =[aliases]
__UpperCamelCase : Union[str, Any] =getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCamelCase__ , 'UnionType' ) and isinstance(lowerCamelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase__ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f' Problem encountered in field \'{field.name}\'.' )
if type(lowerCamelCase__ ) not in field.type.__args__:
# filter `str` in Union
__UpperCamelCase : Optional[int] =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__UpperCamelCase : Optional[int] =getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__UpperCamelCase : str =(
field.type.__args__[0] if isinstance(lowerCamelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
__UpperCamelCase : Tuple =getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__UpperCamelCase : List[str] ={}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase__ ) and issubclass(field.type , lowerCamelCase__ )):
if origin_type is Literal:
__UpperCamelCase : Union[str, Any] =field.type.__args__
else:
__UpperCamelCase : int =[x.value for x in field.type]
__UpperCamelCase : str =make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
__UpperCamelCase : List[str] =field.default
else:
__UpperCamelCase : Optional[Any] =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__UpperCamelCase : int =copy(lowerCamelCase__ )
# Hack because type=bool in argparse does not behave as we want.
__UpperCamelCase : Union[str, Any] =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__UpperCamelCase : Union[str, Any] =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__UpperCamelCase : Any =default
# This tells argparse we accept 0 or 1 value after --field_name
__UpperCamelCase : Union[str, Any] ='?'
# This is the value that will get picked if we do --field_name (without value)
__UpperCamelCase : Any =True
elif isclass(lowerCamelCase__ ) and issubclass(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : Any =field.type.__args__[0]
__UpperCamelCase : Optional[int] ='+'
if field.default_factory is not dataclasses.MISSING:
__UpperCamelCase : List[str] =field.default_factory()
elif field.default is dataclasses.MISSING:
__UpperCamelCase : Dict =True
else:
__UpperCamelCase : Any =field.type
if field.default is not dataclasses.MISSING:
__UpperCamelCase : List[str] =field.default
elif field.default_factory is not dataclasses.MISSING:
__UpperCamelCase : Dict =field.default_factory()
else:
__UpperCamelCase : Union[str, Any] =True
parser.add_argument(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__UpperCamelCase : List[str] =False
parser.add_argument(f'--no_{field.name}' , action='store_false' , dest=field.name , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if hasattr(lowerCamelCase__ , '_argument_group_name' ):
__UpperCamelCase : List[Any] =self.add_argument_group(dtype._argument_group_name )
else:
__UpperCamelCase : Union[str, Any] =self
try:
__UpperCamelCase : Dict[str, type] =get_type_hints(lowerCamelCase__ )
except NameError:
raise RuntimeError(
f'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase__ ):
__UpperCamelCase : Tuple ='.'.join(map(lowerCamelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
f'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase__ ):
if not field.init:
continue
__UpperCamelCase : List[Any] =type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__UpperCamelCase : Union[str, Any] =[]
if args_filename:
args_files.append(Path(lowerCamelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__UpperCamelCase : List[str] =ArgumentParser()
args_file_parser.add_argument(lowerCamelCase__ , type=lowerCamelCase__ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
__UpperCamelCase : int =args_file_parser.parse_known_args(args=lowerCamelCase__ )
__UpperCamelCase : Dict =vars(lowerCamelCase__ ).get(args_file_flag.lstrip('-' ) , lowerCamelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase__ ) for p in cmd_args_file_paths] )
__UpperCamelCase : Optional[int] =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__UpperCamelCase : Any =file_args + args if args is not None else file_args + sys.argv[1:]
__UpperCamelCase : Dict =self.parse_known_args(args=lowerCamelCase__ )
__UpperCamelCase : int =[]
for dtype in self.dataclass_types:
__UpperCamelCase : List[Any] ={f.name for f in dataclasses.fields(lowerCamelCase__ ) if f.init}
__UpperCamelCase : List[Any] ={k: v for k, v in vars(lowerCamelCase__ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =dtype(**lowerCamelCase__ )
outputs.append(lowerCamelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =set(args.keys() )
__UpperCamelCase : str =[]
for dtype in self.dataclass_types:
__UpperCamelCase : Tuple ={f.name for f in dataclasses.fields(lowerCamelCase__ ) if f.init}
__UpperCamelCase : Optional[int] ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__UpperCamelCase : List[str] =dtype(**lowerCamelCase__ )
outputs.append(lowerCamelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase__ )}' )
return tuple(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
with open(Path(lowerCamelCase__ ) , encoding='utf-8' ) as open_json_file:
__UpperCamelCase : int =json.loads(open_json_file.read() )
__UpperCamelCase : str =self.parse_dict(lowerCamelCase__ , allow_extra_keys=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.parse_dict(yaml.safe_load(Path(lowerCamelCase__ ).read_text() ) , allow_extra_keys=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 714
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ :Optional[int] = logging.get_logger()
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] =field(default_factory=a )
UpperCamelCase__ : list =field(default_factory=a )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ , nn.Convad ) or isinstance(lowerCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int =0
UpperCamelCase__ : List =field(default_factory=a )
UpperCamelCase__ : List =field(default_factory=a )
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =Tracker(self.dest )(lowerCamelCase__ ).parametrized
__UpperCamelCase : int =Tracker(self.src )(lowerCamelCase__ ).parametrized
__UpperCamelCase : Optional[int] =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip , lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip , lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while'
f' destination module has {len(lowerCamelCase__ )}.' )
for dest_m, src_m in zip(lowerCamelCase__ , lowerCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def A ( a_ ,a_ ,a_ ,a_ = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
__UpperCamelCase : Tuple =timm.create_model(a_ ,pretrained=a_ ).eval()
__UpperCamelCase : Tuple =ResNetForImageClassification(a_ ).eval()
__UpperCamelCase : Optional[int] =ModuleTransfer(src=a_ ,dest=a_ )
__UpperCamelCase : Optional[int] =torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) ,our_model(a_ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : int =F'resnet{"-".join(name.split("resnet" ) )}'
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='Add model' ,use_temp_dir=a_ ,)
# we can use the convnext one
__UpperCamelCase : Optional[int] =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='Add image processor' ,use_temp_dir=a_ ,)
print(F'Pushed {checkpoint_name}' )
def A ( a_ ,a_ = None ,a_ = True ) -> int:
__UpperCamelCase : Any ='imagenet-1k-id2label.json'
__UpperCamelCase : Optional[Any] =1_000
__UpperCamelCase : Optional[int] =(1, num_labels)
__UpperCamelCase : Any ='huggingface/label-files'
__UpperCamelCase : List[Any] =num_labels
__UpperCamelCase : int =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Union[str, Any] ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : str =idalabel
__UpperCamelCase : List[str] ={v: k for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] =partial(a_ ,num_labels=a_ ,idalabel=a_ ,labelaid=a_ )
__UpperCamelCase : Optional[Any] ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(a_ ,names_to_config[model_name] ,a_ ,a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ ,a_ ,a_ ,a_ )
return config, expected_shape
if __name__ == "__main__":
A_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A_ :Optional[int] = parser.parse_args()
A_ :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 154
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = """glpn"""
def __init__( self , _lowercase=3 , _lowercase=4 , _lowercase=[2, 2, 2, 2] , _lowercase=[8, 4, 2, 1] , _lowercase=[32, 64, 160, 256] , _lowercase=[7, 3, 3, 3] , _lowercase=[4, 2, 2, 2] , _lowercase=[1, 2, 5, 8] , _lowercase=[4, 4, 4, 4] , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=0.1 , _lowercase=1e-6 , _lowercase=64 , _lowercase=10 , _lowercase=-1 , **_lowercase , )-> str:
super().__init__(**_lowercase )
UpperCamelCase_ = num_channels
UpperCamelCase_ = num_encoder_blocks
UpperCamelCase_ = depths
UpperCamelCase_ = sr_ratios
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = patch_sizes
UpperCamelCase_ = strides
UpperCamelCase_ = mlp_ratios
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = decoder_hidden_size
UpperCamelCase_ = max_depth
UpperCamelCase_ = head_in_index
| 628
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ :str = """BridgeTowerImageProcessor"""
UpperCamelCase_ :Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _lowercase , _lowercase )-> Any:
super().__init__(_lowercase , _lowercase )
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , )-> BatchEncoding:
UpperCamelCase_ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
UpperCamelCase_ = self.image_processor(
_lowercase , return_tensors=_lowercase , do_normalize=_lowercase , do_center_crop=_lowercase , **_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> List[Any]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = self.tokenizer.model_input_names
UpperCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 628
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Tuple , A : int = 1_6 , A : int = 8_8 , A : Optional[int] = None , A : int = 1 , A : float = 0.0 , A : int = 3_2 , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[int] = None , A : str = "geglu" , A : Optional[int] = None , ) ->List[Any]:
super().__init__()
lowerCamelCase__ : Tuple = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase__ , attention_head_dim=UpperCAmelCase__ , in_channels=UpperCAmelCase__ , num_layers=UpperCAmelCase__ , dropout=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , sample_size=UpperCAmelCase__ , num_vector_embeds=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , num_embeds_ada_norm=UpperCAmelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase__ : Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase__ : Optional[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase__ : int = [1, 0]
def __lowerCamelCase ( self : Tuple , A : int , A : Optional[Any] , A : int=None , A : Optional[int]=None , A : Optional[Any]=None , A : bool = True , ) ->Dict:
lowerCamelCase__ : Tuple = hidden_states
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Tuple = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase__ : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase__ : Dict = self.transformer_index_for_condition[i]
lowerCamelCase__ : Tuple = self.transformers[transformer_index](
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase__ : Optional[int] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase__ : Optional[int] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase__ )
| 718
|
import math
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[Any] = 0
while num > 0:
lowerCamelCase__ : Tuple = num % 8
lowerCamelCase__ : List[str] = octal + (remainder * math.floor(math.pow(10 , UpperCAmelCase ) ))
counter += 1
lowerCamelCase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(UpperCAmelCase )}"
def _a ( ) -> None:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 130
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE__ : Any = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
SCREAMING_SNAKE_CASE__ : int = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = """▁"""
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase = None , **_lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
UpperCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
UpperCAmelCase__ : int = vocab_file
UpperCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
UpperCAmelCase__ : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase__ : Union[str, Any] = len(self.sp_model ) - 1
UpperCAmelCase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self ):
return len(self.sp_model )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : int = self.sp_model.PieceToId(_lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[str] = """"""
UpperCAmelCase__ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Tuple = []
else:
current_sub_tokens.append(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
UpperCAmelCase__ : str = self.__dict__.copy()
UpperCAmelCase__ : List[Any] = None
return state
def __setstate__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 79
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCAmelCase : Optional[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__UpperCAmelCase : Optional[Any] = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
__UpperCAmelCase : str = "|".join(sys.argv[1:])
__UpperCAmelCase : Tuple = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__UpperCAmelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 643
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 1
|
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase = [
"good first issue",
"feature request",
"wip",
]
def __magic_name__ ( ) -> List[str]:
_lowercase : Union[str, Any] = Github(os.environ['GITHUB_TOKEN'] )
_lowercase : str = g.get_repo('huggingface/accelerate' )
_lowercase : Any = repo.get_issues(state='open' )
for issue in open_issues:
_lowercase : Dict = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
_lowercase : Any = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
_lowercase : Optional[Any] = dt.utcnow()
_lowercase : str = (current_time - issue.updated_at).days
_lowercase : Dict = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 66
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = "laion/clap-htsat-unfused"
__lowerCAmelCase = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Optional[int]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def __UpperCAmelCase ( self , **__UpperCamelCase )-> List[Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__UpperCamelCase )
def __UpperCAmelCase ( self )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_feature_extractor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__lowerCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__lowerCAmelCase = floats_list((3, 1_0_0_0) )
__lowerCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" )
__lowerCAmelCase = processor(audios=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__lowerCAmelCase = "This is a test string"
__lowerCAmelCase = processor(text=__UpperCamelCase )
__lowerCAmelCase = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__UpperCamelCase )
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 367
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
A = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *snake_case : List[str] , **snake_case : str ) -> None:
'''simple docstring'''
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 717
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : int , snake_case : Any , snake_case : str=13 , snake_case : List[str]=30 , snake_case : List[Any]=2 , snake_case : List[Any]=3 , snake_case : Dict=True , snake_case : int=True , snake_case : Any=32 , snake_case : Tuple=2 , snake_case : Optional[int]=4 , snake_case : Any=37 , snake_case : List[Any]="gelu" , snake_case : Optional[int]=0.1 , snake_case : Optional[Any]=0.1 , snake_case : Dict=10 , snake_case : Tuple=0.02 , snake_case : Optional[Any]=3 , snake_case : List[str]=0.6 , snake_case : Dict=None , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Tuple = image_size
__magic_name__ : Union[str, Any] = patch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : int = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : Optional[Any] = mask_ratio
__magic_name__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ : str = (image_size // patch_size) ** 2
__magic_name__ : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCAmelCase ( self : int , snake_case : Optional[int] , snake_case : Any , snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = TFViTMAEModel(config=snake_case )
__magic_name__ : Any = model(snake_case , training=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : Any ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = TFViTMAEForPreTraining(snake_case )
__magic_name__ : List[Any] = model(snake_case , training=snake_case )
# expected sequence length = num_patches
__magic_name__ : List[str] = (self.image_size // self.patch_size) ** 2
__magic_name__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ : Dict = 1
__magic_name__ : Dict = TFViTMAEForPreTraining(snake_case )
__magic_name__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : str = model(snake_case , training=snake_case )
__magic_name__ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) : List[Any] = config_and_inputs
__magic_name__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
snake_case_ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
__magic_name__ : List[str] = TFViTMAEModelTester(self )
__magic_name__ : Any = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def _UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ , __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(snake_case )
__magic_name__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Any = [*signature.parameters.keys()]
__magic_name__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
def _UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
__magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(snake_case )
__magic_name__ : int = self._prepare_for_class(snake_case , snake_case )
__magic_name__ : List[str] = model(snake_case , noise=snake_case )
__magic_name__ : List[str] = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) )
__magic_name__ : Dict = model(**snake_case , noise=snake_case )
__magic_name__ : Tuple = outputs_dict[0].numpy()
__magic_name__ : Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def _UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
np.random.seed(2 )
__magic_name__ , __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case : Tuple ):
__magic_name__ : Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case ):
__magic_name__ : int = v.numpy()
else:
__magic_name__ : Tuple = np.array(snake_case )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = model_class(snake_case )
__magic_name__ : Any = self._prepare_for_class(snake_case , snake_case )
__magic_name__ : Union[str, Any] = prepare_numpy_arrays(snake_case )
__magic_name__ : Union[str, Any] = model(snake_case , noise=snake_case )
__magic_name__ : List[str] = model(**snake_case , noise=snake_case )
self.assert_outputs_same(snake_case , snake_case )
def _UpperCAmelCase ( self : List[Any] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : Tuple ) -> Any:
'''simple docstring'''
np.random.seed(2 )
__magic_name__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ : int = tf.constant(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ : str = tf_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
np.random.seed(2 )
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(snake_case , snake_case ),)
if isinstance(snake_case , snake_case )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case , '''_keras_serializable''' , snake_case )
}
__magic_name__ : str = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ : int = tf.convert_to_tensor(snake_case )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ : List[str] = main_layer_class(snake_case )
__magic_name__ : List[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ : Any = tf.keras.Model(snake_case , outputs=main_layer(snake_case ) )
__magic_name__ : Any = model(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Optional[Any] = os.path.join(snake_case , '''keras_model.h5''' )
model.save(snake_case )
__magic_name__ : Dict = tf.keras.models.load_model(
snake_case , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case , tf.keras.Model )
__magic_name__ : List[str] = model(snake_case )
self.assert_outputs_same(snake_case , snake_case )
@slow
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
__magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(snake_case )
__magic_name__ : List[Any] = self._prepare_for_class(snake_case , snake_case )
__magic_name__ : Union[str, Any] = model(snake_case , noise=snake_case )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ : Union[str, Any] = outputs.last_hidden_state.numpy()
__magic_name__ : str = 0
else:
__magic_name__ : List[Any] = outputs.logits.numpy()
__magic_name__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case , saved_model=snake_case )
__magic_name__ : Optional[Any] = model_class.from_pretrained(snake_case )
__magic_name__ : List[Any] = model(snake_case , noise=snake_case )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ : Optional[int] = after_outputs['''last_hidden_state'''].numpy()
__magic_name__ : Tuple = 0
else:
__magic_name__ : Any = after_outputs['''logits'''].numpy()
__magic_name__ : List[Any] = 0
__magic_name__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
def _UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
np.random.seed(2 )
__magic_name__ , __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = model_class(snake_case )
__magic_name__ : Optional[int] = self._prepare_for_class(snake_case , snake_case )
__magic_name__ : Optional[Any] = model(snake_case , noise=snake_case )
__magic_name__ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case )
__magic_name__ : Tuple = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ : Dict = model_class.from_config(model.config )
__magic_name__ : str = new_model(snake_case ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ : Any = new_model(snake_case , noise=snake_case )
self.assert_outputs_same(snake_case , snake_case )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Dict = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(snake_case )
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
__magic_name__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
__magic_name__ : List[str] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__magic_name__ : List[str] = self.default_image_processor
__magic_name__ : Optional[Any] = prepare_img()
__magic_name__ : str = image_processor(images=snake_case , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ : Optional[int] = ViTMAEConfig()
__magic_name__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ : Optional[int] = model(**snake_case , noise=snake_case )
# verify the logits
__magic_name__ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case )
__magic_name__ : Dict = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case , atol=1e-4 )
| 147
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__: Union[str, Any] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Optional[Any] = ["MobileNetV2FeatureExtractor"]
__magic_name__: Dict = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[Any] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__magic_name__: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 324
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__magic_name__: Optional[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__magic_name__: Dict = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCamelCase ( _A, _A=100, _A=" " ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = text.split(_A )
return [character.join(text[i : i + n] ).strip() for i in range(0, len(_A ), _A )]
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : Dict = [], []
for title, text in zip(documents["""title"""], documents["""text"""] ):
if text is not None:
for passage in split_text(_A ):
titles.append(title if title is not None else """""" )
texts.append(_A )
return {"title": titles, "text": texts}
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = ctx_tokenizer(
documents["""title"""], documents["""text"""], truncation=_A, padding="""longest""", return_tensors="""pt""" )["""input_ids"""]
__magic_name__ : int = ctx_encoder(input_ids.to(device=_A ), return_dict=_A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase ( _A, _A, _A, ):
"""simple docstring"""
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__magic_name__ : Optional[Any] = load_dataset(
"""csv""", data_files=[rag_example_args.csv_path], split="""train""", delimiter="""\t""", column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__magic_name__ : Tuple = dataset.map(_A, batched=_A, num_proc=processing_args.num_proc )
# And compute the embeddings
__magic_name__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_A )
__magic_name__ : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__magic_name__ : Optional[Any] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__magic_name__ : List[str] = dataset.map(
partial(_A, ctx_encoder=_A, ctx_tokenizer=_A ), batched=_A, batch_size=processing_args.batch_size, features=_A, )
# And finally save your dataset
__magic_name__ : Union[str, Any] = os.path.join(rag_example_args.output_dir, """my_knowledge_dataset""" )
dataset.save_to_disk(_A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__magic_name__ : List[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""", custom_index=_A )
# And save the index
__magic_name__ : List[Any] = os.path.join(rag_example_args.output_dir, """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case__ :
lowercase__ : str = field(
default=str(Path(_lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowercase__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowercase__ : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowercase__ : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowercase__ : Optional[str] = field(
default=str(Path(_lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class snake_case__ :
lowercase__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowercase__ : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class snake_case__ :
lowercase__ : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowercase__ : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__magic_name__: int = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__magic_name__ , __magic_name__ , __magic_name__: int = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__magic_name__: Union[str, Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 324
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __magic_name__ (__lowercase ):
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_a , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(_a , "num_encoder_blocks" ) )
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=4 , _a=[2, 2, 2, 2] , _a=[8, 4, 2, 1] , _a=[16, 32, 64, 128] , _a=[1, 4, 8, 16] , _a=[1, 2, 4, 8] , _a=True , _a=True , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.0_2 , _a=3 , _a=None , ) -> Dict:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_encoder_blocks
lowerCAmelCase_ = sr_ratios
lowerCAmelCase_ = depths
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = downsampling_rates
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = scope
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> int:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self , _a , _a , _a ) -> str:
lowerCAmelCase_ = SegformerModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = lowerCAmelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __a ( self , _a , _a , _a ) -> str:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = SegformerForSemanticSegmentation(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase_ = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __a ( self , _a , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = SegformerForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_a )
lowerCAmelCase_ = model(_a , labels=_a )
self.parent.assertGreater(result.loss , 0.0 )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> List[str]:
lowerCAmelCase_ = SegformerModelTester(self )
lowerCAmelCase_ = SegformerConfigTester(self , config_class=_a )
def __a ( self ) -> str:
self.config_tester.run_common_tests()
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_a )
@unittest.skip("SegFormer does not use inputs_embeds" )
def __a ( self ) -> List[Any]:
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase_ = outputs.attentions
lowerCAmelCase_ = sum(self.model_tester.depths )
self.assertEqual(len(_a ) , _a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
lowerCAmelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase_ = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase_ = len(_a )
# Check attention is always last and order is fine
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
lowerCAmelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __a ( self ) -> Optional[Any]:
def check_hidden_states_output(_a , _a , _a ):
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(_a , _a , _a )
def __a ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_a ):
continue
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.train()
lowerCAmelCase_ = self._prepare_for_class(_a , _a , return_labels=_a )
lowerCAmelCase_ = model(**_a ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> List[Any]:
pass
@slow
def __a ( self ) -> str:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = SegformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __magic_name__ (unittest.TestCase ):
@slow
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowerCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_a )
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" )
lowerCAmelCase_ = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowerCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(_a )
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" )
lowerCAmelCase_ = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
lowerCAmelCase_ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1E-1 ) )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowerCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_a )
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" )
lowerCAmelCase_ = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = outputs.logits.detach().cpu()
lowerCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(500, 300)] )
lowerCAmelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _a )
lowerCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_a )
lowerCAmelCase_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _a )
| 721
|
from collections.abc import Callable
import numpy as np
def A(__a: Callable , __a: float , __a: float , __a: float , __a: float ):
lowerCAmelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase_ = np.zeros((n + 1,) )
lowerCAmelCase_ = ya
lowerCAmelCase_ = xa
for k in range(__a ):
lowerCAmelCase_ = y[k] + step_size * ode_func(__a , y[k] )
lowerCAmelCase_ = y[k] + (
(step_size / 2) * (ode_func(__a , y[k] ) + ode_func(x + step_size , __a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
| 0
|
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = []
def _snake_case ( A , A , A ) -> bool:
for i in range(len(A ) ):
if board[row][i] == 1:
return False
for i in range(len(A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , len(A ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( A , A ) -> bool:
if row >= len(A ):
solution.append(A )
printboard(A )
print()
return True
for i in range(len(A ) ):
if is_safe(A , A , A ):
lowerCAmelCase__ = 1
solve(A , row + 1 )
lowerCAmelCase__ = 0
return False
def _snake_case ( A ) -> None:
for i in range(len(A ) ):
for j in range(len(A ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__UpperCAmelCase = 8
__UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 90
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=1 / 255 , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __A , __A=False ):
if not batched:
__UpperCAmelCase = image_inputs[0]
if isinstance(__A , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
__UpperCAmelCase = self.size['shortest_edge']
elif w > h:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = self.size['shortest_edge']
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(__A , key=lambda __A : item[0] )[0]
__UpperCAmelCase = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
_A : Any = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__UpperCAmelCase = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_rescale' ) )
self.assertTrue(hasattr(__A , 'rescale_factor' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
self.assertTrue(hasattr(__A , 'do_pad' ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
__UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __A )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'image_id': 39_769, 'annotations': target}
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
__UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify masks
__UpperCAmelCase = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __A )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
| 126
| 0
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = hf_hub_url(repo_id=lowerCAmelCase__ , path=lowerCAmelCase__ , revision=lowerCAmelCase__ )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowerCAmelCase__ )}"
| 251
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
__UpperCAmelCase = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
__UpperCAmelCase = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
__UpperCAmelCase = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCAmelCase ( self : List[str] , a_ : str , a_ : Any ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
a__ : List[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
a__ : Tuple = evaluate(dataset=a_ , predictions=a_ )
return score
| 251
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : bool , UpperCamelCase__ : str = None , UpperCamelCase__ : list = None):
'''simple docstring'''
snake_case__ = None
snake_case__ = os.path.abspath(os.path.join("""examples""" , """by_feature"""))
snake_case__ = os.path.abspath("""examples""")
for item in os.listdir(UpperCamelCase__):
if item not in EXCLUDE_EXAMPLES:
snake_case__ = os.path.join(UpperCamelCase__ , UpperCamelCase__)
if os.path.isfile(UpperCamelCase__) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase__ , feature_script=UpperCamelCase__ , tested_section="""main()""" if parser_only else """training_function()""" , ):
snake_case__ = compare_against_test(
os.path.join(UpperCamelCase__ , UpperCamelCase__) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = """\n""".join(UpperCamelCase__)
if special_strings is not None:
for string in special_strings:
snake_case__ = diff.replace(UpperCamelCase__ , """""")
self.assertEqual(UpperCamelCase__ , """""")
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.one_complete_example("""complete_nlp_example.py""" , UpperCamelCase__)
self.one_complete_example("""complete_nlp_example.py""" , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = os.path.abspath(os.path.join("""examples""" , """cv_example.py"""))
snake_case__ = [
""" """ * 1_6 + """{\n\n""",
""" """ * 2_0 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 2_0 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 2_0 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 2_0 + """\"epoch\": epoch,\n\n""",
""" """ * 1_6 + """},\n\n""",
""" """ * 1_6 + """step=epoch,\n""",
""" """ * 1_2,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
self.one_complete_example("""complete_cv_example.py""" , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = False
@classmethod
def __magic_name__ ( cls : Union[str, Any]):
'''simple docstring'''
super().setUpClass()
snake_case__ = tempfile.mkdtemp()
snake_case__ = os.path.join(cls._tmpdir , """default_config.yml""")
write_basic_config(save_location=cls.configPath)
snake_case__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""")))
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
snake_case__ = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""")))
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0')}
'''.split()
snake_case__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__)
self.assertNotIn("""epoch 0:""" , UpperCamelCase__)
self.assertIn("""epoch 1:""" , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2')}
'''.split()
snake_case__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__)
if torch.cuda.is_available():
snake_case__ = torch.cuda.device_count()
else:
snake_case__ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , UpperCamelCase__)
self.assertIn("""epoch 1:""" , UpperCamelCase__)
else:
self.assertIn("""epoch 0:""" , UpperCamelCase__)
self.assertIn("""epoch 1:""" , UpperCamelCase__)
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""}):
snake_case__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__)
snake_case__ = re.findall("""({.+})""" , UpperCamelCase__)
snake_case__ = [r for r in results if """accuracy""" in r][-1]
snake_case__ = ast.literal_eval(UpperCamelCase__)
self.assertGreaterEqual(results["""accuracy"""] , 0.75)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""})
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
snake_case__ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """tracking""")))
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs)
| 654
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654
| 1
|
def snake_case_ (__A : str ) -> bool:
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def snake_case_ (__A : str ) -> bool:
__lowerCAmelCase : Tuple = credit_card_number
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Optional[Any] = len(__A ) - 2
for i in range(__A , -1 , -2 ):
# double the value of every second digit
__lowerCAmelCase : List[str] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
__lowerCAmelCase : str = cc_number[:i] + str(__A ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def snake_case_ (__A : str ) -> bool:
__lowerCAmelCase : Any = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 1_3 <= len(__A ) <= 1_6:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__A ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__A ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 218
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MobileViTFeatureExtractor"""]
__UpperCAmelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=30 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=None , lowerCamelCase_=2 , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = num_patches + 2
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFDeiTModel(config=lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = TFDeiTForImageClassification(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFDeiTForImageClassification(lowerCamelCase_ )
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowercase : Union[str, Any] = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : Optional[int] = False
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = TFDeiTModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
pass
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Dense ) )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Dict:
"""simple docstring"""
_UpperCamelCase = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _lowercase ( ) -> str:
"""simple docstring"""
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase ( self ) -> str:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="tf" )
# forward pass
_UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
_UpperCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 147
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowercase ):
__lowercase : str = (KDPMaDiscreteScheduler,)
__lowercase : List[Any] = 10
def lowercase ( self , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCamelCase_ )
return config
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def lowercase ( self ) -> int:
"""simple docstring"""
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def lowercase ( self ) -> List[str]:
"""simple docstring"""
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase_ )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
if str(lowerCamelCase_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 147
| 1
|
'''simple docstring'''
import sys
A_ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
__lowerCAmelCase : Dict = 1
for digit in s:
product *= int(_UpperCAmelCase )
return product
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = -sys.maxsize - 1
__lowerCAmelCase : int = n[:1_3]
__lowerCAmelCase : Dict = 1_3
while cur_index < len(_UpperCAmelCase ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
__lowerCAmelCase : Union[str, Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__lowerCAmelCase : str = max(_UpperCAmelCase ,str_eval(_UpperCAmelCase ) )
__lowerCAmelCase : Tuple = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 123
|
'''simple docstring'''
from math import factorial
A_ = {str(digit): factorial(digit) for digit in range(10)}
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : int = 6_0 ,_UpperCAmelCase : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__lowerCAmelCase : Any = 0
# the cached sizes of the previous chains
__lowerCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__lowerCAmelCase : Union[str, Any] = set()
__lowerCAmelCase : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowerCAmelCase : List[str] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__lowerCAmelCase : Optional[Any] = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowerCAmelCase : Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 123
| 1
|
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A__ ( A__ ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = int(A__ )
_UpperCAmelCase = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def A__ ( A__ , A__ , A__ , A__ , A__=300 ) -> List[str]:
'''simple docstring'''
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def A__ ( A__ ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_UpperCAmelCase = F"""{elt:.6f}""" if isinstance(A__ , A__ ) else str(A__ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class a :
"""simple docstring"""
A__ : str = 5
A__ : Tuple = 0.2
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = 300 , ) -> Dict:
_UpperCAmelCase = total
_UpperCAmelCase = '''''' if prefix is None else prefix
_UpperCAmelCase = leave
_UpperCAmelCase = parent
_UpperCAmelCase = width
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
def __A ( self , snake_case_ , snake_case_ = False , snake_case_ = None ) -> Optional[Any]:
_UpperCAmelCase = value
if comment is not None:
_UpperCAmelCase = comment
if self.last_value is None:
_UpperCAmelCase = time.time()
_UpperCAmelCase = value
_UpperCAmelCase = None
_UpperCAmelCase = self.warmup
_UpperCAmelCase = 1
self.update_bar(_A )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_UpperCAmelCase = time.time()
_UpperCAmelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_UpperCAmelCase = self.elapsed_time / (value - self.start_value)
else:
_UpperCAmelCase = None
if value >= self.total:
_UpperCAmelCase = self.total
_UpperCAmelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_UpperCAmelCase = self.average_time_per_item * (self.total - value)
self.update_bar(_A )
_UpperCAmelCase = value
_UpperCAmelCase = current_time
if self.average_time_per_item is None:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __A ( self , snake_case_ , snake_case_=None ) -> Tuple:
_UpperCAmelCase = ''' ''' * (len(str(self.total ) ) - len(str(_A ) )) + str(_A )
if self.elapsed_time is None:
_UpperCAmelCase = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_UpperCAmelCase = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_UpperCAmelCase = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __A ( self ) -> List[str]:
_UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=_A )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self ) -> Any:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=None ) -> Dict:
super().__init__(_A )
_UpperCAmelCase = None if column_names is None else [column_names]
_UpperCAmelCase = None
def __A ( self ) -> Any:
_UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=_A )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self , snake_case_ ) -> Union[str, Any]:
if self.inner_table is None:
_UpperCAmelCase = [list(values.keys() ), list(values.values() )]
else:
_UpperCAmelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_A )
_UpperCAmelCase = columns
self.inner_table.append([values[c] for c in columns] )
def __A ( self , snake_case_ , snake_case_=None , snake_case_=300 ) -> Any:
_UpperCAmelCase = NotebookProgressBar(_A , prefix=_A , parent=self , width=_A )
return self.child_bar
def __A ( self ) -> Dict:
_UpperCAmelCase = None
self.display()
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ) -> Optional[int]:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
def __A ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> int:
_UpperCAmelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
_UpperCAmelCase = NotebookTrainingTracker(state.max_steps , _A )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> List[Any]:
_UpperCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_UpperCAmelCase = False
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ) -> Tuple:
if not has_length(_A ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_UpperCAmelCase = self.training_tracker.add_child(len(_A ) )
else:
_UpperCAmelCase = NotebookProgressBar(len(_A ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> int:
if self.prediction_bar is not None:
self.prediction_bar.close()
_UpperCAmelCase = None
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ) -> Union[str, Any]:
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_UpperCAmelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_UpperCAmelCase = state.global_step
self.training_tracker.write_line(_A )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ) -> List[Any]:
if self.training_tracker is not None:
_UpperCAmelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_UpperCAmelCase = log['''loss''']
break
if self.first_column == "Epoch":
_UpperCAmelCase = int(state.epoch )
else:
_UpperCAmelCase = state.global_step
_UpperCAmelCase = '''eval'''
for k in metrics:
if k.endswith("_loss" ):
_UpperCAmelCase = re.sub(R"\_loss$" , "" , _A )
_UpperCAmelCase = metrics.pop("total_flos" , _A )
_UpperCAmelCase = metrics.pop("epoch" , _A )
_UpperCAmelCase = metrics.pop(F"""{metric_key_prefix}_runtime""" , _A )
_UpperCAmelCase = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , _A )
_UpperCAmelCase = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , _A )
_UpperCAmelCase = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , _A )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
_UpperCAmelCase = v
else:
_UpperCAmelCase = k.split("_" )
_UpperCAmelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
_UpperCAmelCase = v
self.training_tracker.write_line(_A )
self.training_tracker.remove_child()
_UpperCAmelCase = None
# Evaluation takes a long time so we should force the next update.
_UpperCAmelCase = True
def __A ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Dict:
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=_A )
_UpperCAmelCase = None
| 426
|
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = [10, 20, 30, 40, 50, 60]
lowercase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowercase : Optional[int] = 100
self.assertEqual(kp.calc_profit(_A , _A , _A ) , 210 )
def __a ( self : Dict ) -> int:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Weight can not be negative.''' )
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Profit can not be negative.''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(
_A , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 217
| 0
|
"""simple docstring"""
def lowerCamelCase (a_ :str , a_ :List[Any] , a_ :str , a_ :Union[str, Any]) -> Any:
if height >= 1:
move_tower(height - 1 , a_ , a_ , a_)
move_disk(a_ , a_)
move_tower(height - 1 , a_ , a_ , a_)
def lowerCamelCase (a_ :List[Any] , a_ :Tuple) -> str:
print('''moving disk from''' , a_ , '''to''' , a_)
def lowerCamelCase () -> Union[str, Any]:
lowercase :Optional[int] = int(input('''Height of hanoi: ''').strip())
move_tower(a_ , '''A''' , '''B''' , '''C''')
if __name__ == "__main__":
main()
| 475
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
__A : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__A : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__A : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
lowercase :Optional[int] = ZeroShotClassificationPipeline(
model=snake_case__ , tokenizer=snake_case__ , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
# No kwarg
lowercase :Tuple = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
lowercase :Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
lowercase :Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowercase :Optional[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowercase :Optional[Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
# https://github.com/huggingface/transformers/issues/13846
lowercase :Optional[Any] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]}
for i in range(1 )
] , )
lowercase :Tuple = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]}
for i in range(2 )
] , )
with self.assertRaises(snake_case__ ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(snake_case__ ):
classifier(snake_case__ , candidate_labels='''politics''' )
with self.assertRaises(snake_case__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(snake_case__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=snake_case__ )
with self.assertRaises(snake_case__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(snake_case__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=snake_case__ , )
self.run_entailment_id(snake_case__ )
def __snake_case ( self : Any , snake_case__ : Pipeline ):
'''simple docstring'''
lowercase :List[Any] = zero_shot_classifier.model.config
lowercase :int = config.labelaid
lowercase :str = zero_shot_classifier.entailment_id
lowercase :Dict = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowercase :Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowercase :Tuple = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowercase :str = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowercase :Optional[Any] = original_labelaid
self.assertEqual(snake_case__ , zero_shot_classifier.entailment_id )
@require_torch
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :str = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowercase :Any = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowercase :List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowercase :Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowercase :Optional[Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowercase :Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowercase :str = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 475
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase( UpperCamelCase_ ):
"""simple docstring"""
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "tf_padding" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "depth_multiplier" ) )
class UpperCAmelCase:
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.25 , lowerCamelCase=8 , lowerCamelCase=True , lowerCamelCase=1024 , lowerCamelCase=32 , lowerCamelCase="relu6" , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ) -> str:
"""simple docstring"""
lowercase__ : List[str] = parent
lowercase__ : List[str] = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = image_size
lowercase__ : int = depth_multiplier
lowercase__ : int = min_depth
lowercase__ : Any = tf_padding
lowercase__ : List[str] = int(last_hidden_size * depth_multiplier )
lowercase__ : List[Any] = output_stride
lowercase__ : Any = hidden_act
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : Dict = use_labels
lowercase__ : int = is_training
lowercase__ : Optional[int] = num_labels
lowercase__ : List[str] = initializer_range
lowercase__ : int = scope
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self ) -> Any:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : str = MobileNetVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : str = self.num_labels
lowercase__ : int = MobileNetVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a : int = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a : Optional[Any] = False
a : int = False
a : List[Any] = False
a : str = False
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = MobileNetVaModelTester(self )
lowercase__ : str = MobileNetVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def __a ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __a ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __a ( self ) -> str:
"""simple docstring"""
pass
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __a ( self ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase__ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : Optional[Any] = 26
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = MobileNetVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def snake_case_ ( ) -> str:
lowercase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Any = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Tuple = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 397
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCamelCase__ : Any = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCamelCase__ : Optional[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase__ : Tuple = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(UpperCamelCase__ )-1}''' )
if "norm" in key:
UpperCamelCase__ : Dict = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ : int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCamelCase__ : Tuple = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(UpperCamelCase__ )-1}''' )
if "layer_norm1" in key:
UpperCamelCase__ : Tuple = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ : Optional[Any] = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase__ : str = key.replace(f'''block{idx}''' , f'''block.{int(UpperCamelCase__ )-1}''' )
if "attn.q" in key:
UpperCamelCase__ : Dict = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase__ : Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase__ : List[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCamelCase__ : int = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCamelCase__ : str = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCamelCase__ : List[str] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCamelCase__ : Optional[int] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ : str = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase__ : Tuple = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(UpperCamelCase__ )-1}''' )
if "bot_conv" in key:
UpperCamelCase__ : List[Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
UpperCamelCase__ : str = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
UpperCamelCase__ : Tuple = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
UpperCamelCase__ : List[Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
UpperCamelCase__ : Tuple = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
UpperCamelCase__ : int = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCamelCase__ : List[str] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCamelCase__ : Tuple = key.replace('''module.last_layer_depth''' , '''head.head''' )
UpperCamelCase__ : List[str] = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ : Optional[Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCamelCase__ : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ : Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Any = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=None ):
UpperCamelCase__ : Any = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCamelCase__ : str = GLPNImageProcessor()
# prepare image
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ : Tuple = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase__ : Optional[Any] = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__ , UpperCamelCase__ )
# create HuggingFace model and load state dict
UpperCamelCase__ : str = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
UpperCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
UpperCamelCase__ : Optional[int] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase__ : Tuple = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCamelCase__ : Optional[Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
lowerCamelCase =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 285
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
lowercase = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowercase = Dataset.from_dict(UpperCAmelCase )
return dataset
class __lowercase ( _A ):
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase = get_dataset()
lowercase = make_duplicate_clusters(__lowerCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __a ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase = get_dataset()
lowercase ,lowercase = deduplicate_dataset(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 2 )
print(__lowerCamelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __lowerCamelCase )
| 707
|
from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase = list(__lowerCamelCase )
lowercase = degree
def __add__( self : Any , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : str , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def __a ( self : List[str] , __lowerCamelCase : int | float ) -> int | float:
'''simple docstring'''
lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ) -> str:
'''simple docstring'''
lowercase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return self.__str__()
def __a ( self : Union[str, Any] ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * self.degree
for i in range(self.degree ):
lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : int | float = 0 ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + 2)
lowercase = constant
for i in range(self.degree + 1 ):
lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
return not self.__eq__(__lowerCamelCase )
| 479
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.